/* Public E-Switch API */
#define ESW_ALLOWED(esw) ((esw) && MLX5_ESWITCH_MANAGER((esw)->dev))
-int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int nvfs, int mode)
+int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode)
{
struct mlx5_vport *vport;
- int total_nvports = 0;
int err;
int i, enabled_events;
if (!MLX5_CAP_ESW_EGRESS_ACL(esw->dev, ft_support))
esw_warn(esw->dev, "engress ACL is not supported by FW\n");
- if (mode == MLX5_ESWITCH_OFFLOADS) {
- if (mlx5_core_is_ecpf_esw_manager(esw->dev))
- total_nvports = esw->total_vports;
- else
- total_nvports = nvfs + MLX5_SPECIAL_VPORTS(esw->dev);
- }
-
esw->mode = mode;
mlx5_lag_update(esw->dev);
} else {
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
- err = esw_offloads_init(esw, nvfs, total_nvports);
+ err = esw_offloads_init(esw);
}
if (err)
}
/* Enable VF vports */
- mlx5_esw_for_each_vf_vport(esw, i, vport, nvfs)
+ mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
esw_enable_vport(esw, vport, enabled_events);
if (mode == MLX5_ESWITCH_LEGACY) {
esw_info(esw->dev, "Enable: mode(%s), nvfs(%d), active vports(%d)\n",
mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
- nvfs, esw->enabled_vports);
+ esw->esw_funcs.num_vfs, esw->enabled_vports);
return 0;
esw_info(esw->dev, "Disable: mode(%s), nvfs(%d), active vports(%d)\n",
esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
- esw->dev->priv.sriov.num_vfs, esw->enabled_vports);
+ esw->esw_funcs.num_vfs, esw->enabled_vports);
mc_promisc = &esw->mc_promisc;
return (dev0->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS &&
dev1->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS);
}
+
+void mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, const int num_vfs)
+{
+ u32 out[MLX5_ST_SZ_DW(query_esw_functions_out)] = {};
+ int err;
+
+ WARN_ON_ONCE(esw->mode != MLX5_ESWITCH_NONE);
+
+ if (!mlx5_core_is_ecpf_esw_manager(esw->dev)) {
+ esw->esw_funcs.num_vfs = num_vfs;
+ return;
+ }
+
+ err = mlx5_esw_query_functions(esw->dev, out, sizeof(out));
+ if (!err)
+ esw->esw_funcs.num_vfs = MLX5_GET(query_esw_functions_out, out,
+ host_params_context.host_num_of_vfs);
+}
};
void esw_offloads_cleanup(struct mlx5_eswitch *esw);
-int esw_offloads_init(struct mlx5_eswitch *esw, int vf_nvports,
- int total_nvports);
+int esw_offloads_init(struct mlx5_eswitch *esw);
void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw);
int esw_offloads_init_reps(struct mlx5_eswitch *esw);
void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
/* E-Switch API */
int mlx5_eswitch_init(struct mlx5_core_dev *dev);
void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw);
-int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int nvfs, int mode);
+int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode);
void mlx5_eswitch_disable(struct mlx5_eswitch *esw);
int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
int vport, u8 mac[ETH_ALEN]);
int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
struct netlink_ext_ack *extack);
int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode);
-int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode);
+int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, u8 *mode);
int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
enum devlink_eswitch_encap_mode encap,
struct netlink_ext_ack *extack);
bool mlx5_eswitch_is_vf_vport(const struct mlx5_eswitch *esw, u16 vport_num);
+void mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, const int num_vfs);
+
#else /* CONFIG_MLX5_ESWITCH */
/* eswitch API stubs */
static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
static inline void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) {}
-static inline int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int nvfs, int mode) { return 0; }
+static inline int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode) { return 0; }
static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw) {}
static inline bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1) { return true; }
static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) { return false; }
return -EOPNOTSUPP;
}
+static inline void mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, const int num_vfs) {}
+
#define FDB_MAX_CHAIN 1
#define FDB_SLOW_PATH_CHAIN (FDB_MAX_CHAIN + 1)
#define FDB_MAX_PRIO 1
static int esw_offloads_start(struct mlx5_eswitch *esw,
struct netlink_ext_ack *extack)
{
- int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
+ int err, err1;
if (esw->mode != MLX5_ESWITCH_LEGACY &&
!mlx5_core_is_ecpf_esw_manager(esw->dev)) {
}
mlx5_eswitch_disable(esw);
- err = mlx5_eswitch_enable(esw, num_vfs, MLX5_ESWITCH_OFFLOADS);
+ mlx5_eswitch_update_num_of_vfs(esw, esw->dev->priv.sriov.num_vfs);
+ err = mlx5_eswitch_enable(esw, MLX5_ESWITCH_OFFLOADS);
if (err) {
NL_SET_ERR_MSG_MOD(extack,
"Failed setting eswitch to offloads");
- err1 = mlx5_eswitch_enable(esw, num_vfs, MLX5_ESWITCH_LEGACY);
+ err1 = mlx5_eswitch_enable(esw, MLX5_ESWITCH_LEGACY);
if (err1) {
NL_SET_ERR_MSG_MOD(extack,
"Failed setting eswitch back to legacy");
}
if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
if (mlx5_eswitch_inline_mode_get(esw,
- num_vfs,
&esw->offloads.inline_mode)) {
esw->offloads.inline_mode = MLX5_INLINE_MODE_L2;
NL_SET_ERR_MSG_MOD(extack,
__unload_reps_vf_vport(esw, nvports, rep_type);
}
-static void __unload_reps_all_vport(struct mlx5_eswitch *esw, int nvports,
- u8 rep_type)
+static void __unload_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type)
{
- __unload_reps_vf_vport(esw, nvports, rep_type);
+ __unload_reps_vf_vport(esw, esw->esw_funcs.num_vfs, rep_type);
/* Special vports must be the last to unload. */
__unload_reps_special_vport(esw, rep_type);
}
-static void esw_offloads_unload_all_reps(struct mlx5_eswitch *esw, int nvports)
+static void esw_offloads_unload_all_reps(struct mlx5_eswitch *esw)
{
u8 rep_type = NUM_REP_TYPES;
while (rep_type-- > 0)
- __unload_reps_all_vport(esw, nvports, rep_type);
+ __unload_reps_all_vport(esw, rep_type);
}
static int __esw_offloads_load_rep(struct mlx5_eswitch *esw,
return err;
}
+static int __load_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type)
+{
+ int err;
+
+ /* Special vports must be loaded first, uplink rep creates mdev resource. */
+ err = __load_reps_special_vport(esw, rep_type);
+ if (err)
+ return err;
+
+ err = __load_reps_vf_vport(esw, esw->esw_funcs.num_vfs, rep_type);
+ if (err)
+ goto err_vfs;
+
+ return 0;
+
+err_vfs:
+ __unload_reps_special_vport(esw, rep_type);
+ return err;
+}
+
static int esw_offloads_load_vf_reps(struct mlx5_eswitch *esw, int nvports)
{
u8 rep_type = 0;
return err;
}
-static int esw_offloads_load_special_vport(struct mlx5_eswitch *esw)
+static int esw_offloads_load_all_reps(struct mlx5_eswitch *esw)
{
u8 rep_type = 0;
int err;
for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
- err = __load_reps_special_vport(esw, rep_type);
+ err = __load_reps_all_vport(esw, rep_type);
if (err)
goto err_reps;
}
err_reps:
while (rep_type-- > 0)
- __unload_reps_special_vport(esw, rep_type);
+ __unload_reps_all_vport(esw, rep_type);
return err;
}
esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
}
-static int esw_offloads_steering_init(struct mlx5_eswitch *esw, int nvports)
+static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
{
+ int num_vfs = esw->esw_funcs.num_vfs;
+ int total_vports;
int err;
+ if (mlx5_core_is_ecpf_esw_manager(esw->dev))
+ total_vports = esw->total_vports;
+ else
+ total_vports = num_vfs + MLX5_SPECIAL_VPORTS(esw->dev);
+
memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb));
mutex_init(&esw->fdb_table.offloads.fdb_prio_lock);
if (err)
return err;
- err = esw_create_offloads_fdb_tables(esw, nvports);
+ err = esw_create_offloads_fdb_tables(esw, total_vports);
if (err)
goto create_fdb_err;
- err = esw_create_offloads_table(esw, nvports);
+ err = esw_create_offloads_table(esw, total_vports);
if (err)
goto create_ft_err;
- err = esw_create_vport_rx_group(esw, nvports);
+ err = esw_create_vport_rx_group(esw, total_vports);
if (err)
goto create_fg_err;
kfree(host_work);
}
-static void esw_emulate_event_handler(struct work_struct *work)
-{
- struct mlx5_host_work *host_work =
- container_of(work, struct mlx5_host_work, work);
- struct mlx5_eswitch *esw = host_work->esw;
- int err;
- if (esw->esw_funcs.num_vfs) {
- err = esw_offloads_load_vf_reps(esw, esw->esw_funcs.num_vfs);
- if (err)
- esw_warn(esw->dev, "Load vf reps err=%d\n", err);
- }
- kfree(host_work);
-}
-
-static int esw_functions_changed_event(struct notifier_block *nb,
- unsigned long type, void *data)
+static int
+esw_functions_changed_event(struct notifier_block *nb, unsigned long type, void *data)
{
struct mlx5_esw_functions *esw_funcs;
struct mlx5_host_work *host_work;
host_work->esw = esw;
- if (mlx5_eswitch_is_funcs_handler(esw->dev))
- INIT_WORK(&host_work->work,
- esw_functions_changed_event_handler);
- else
- INIT_WORK(&host_work->work, esw_emulate_event_handler);
+ INIT_WORK(&host_work->work, esw_functions_changed_event_handler);
queue_work(esw->work_queue, &host_work->work);
return NOTIFY_OK;
}
-static void esw_functions_changed_event_init(struct mlx5_eswitch *esw,
- u16 vf_nvports)
+static void esw_functions_changed_event_init(struct mlx5_eswitch *esw)
{
if (mlx5_eswitch_is_funcs_handler(esw->dev)) {
- esw->esw_funcs.num_vfs = 0;
MLX5_NB_INIT(&esw->esw_funcs.nb, esw_functions_changed_event,
ESW_FUNCTIONS_CHANGED);
mlx5_eq_notifier_register(esw->dev, &esw->esw_funcs.nb);
- } else {
- esw->esw_funcs.num_vfs = vf_nvports;
}
}
flush_workqueue(esw->work_queue);
}
-int esw_offloads_init(struct mlx5_eswitch *esw, int vf_nvports,
- int total_nvports)
+int esw_offloads_init(struct mlx5_eswitch *esw)
{
int err;
- err = esw_offloads_steering_init(esw, total_nvports);
+ err = esw_offloads_steering_init(esw);
if (err)
return err;
goto err_vport_metadata;
}
- /* Only load special vports reps. VF reps will be loaded in
- * context of functions_changed event handler through real
- * or emulated event.
- */
- err = esw_offloads_load_special_vport(esw);
+ err = esw_offloads_load_all_reps(esw);
if (err)
goto err_reps;
esw_offloads_devcom_init(esw);
- esw_functions_changed_event_init(esw, vf_nvports);
+ esw_functions_changed_event_init(esw);
mlx5_rdma_enable_roce(esw->dev);
- /* Call esw_functions_changed event to load VF reps:
- * 1. HW does not support the event then emulate it
- * Or
- * 2. The event was already notified when num_vfs changed
- * and eswitch was in legacy mode
- */
- esw_functions_changed_event(&esw->esw_funcs.nb.nb,
- MLX5_EVENT_TYPE_ESW_FUNCTIONS_CHANGED,
- NULL);
-
return 0;
err_reps:
static int esw_offloads_stop(struct mlx5_eswitch *esw,
struct netlink_ext_ack *extack)
{
- int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
+ int err, err1;
mlx5_eswitch_disable(esw);
- err = mlx5_eswitch_enable(esw, num_vfs, MLX5_ESWITCH_LEGACY);
+ err = mlx5_eswitch_enable(esw, MLX5_ESWITCH_LEGACY);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy");
- err1 = mlx5_eswitch_enable(esw, num_vfs, MLX5_ESWITCH_OFFLOADS);
+ err1 = mlx5_eswitch_enable(esw, MLX5_ESWITCH_OFFLOADS);
if (err1) {
NL_SET_ERR_MSG_MOD(extack,
"Failed setting eswitch back to offloads");
esw_functions_changed_event_cleanup(esw);
mlx5_rdma_disable_roce(esw->dev);
esw_offloads_devcom_cleanup(esw);
- esw_offloads_unload_all_reps(esw, esw->esw_funcs.num_vfs);
+ esw_offloads_unload_all_reps(esw);
if (mlx5_eswitch_vport_match_metadata_enabled(esw))
mlx5_eswitch_disable_passing_vport_metadata(esw);
esw_offloads_steering_cleanup(esw);
return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
}
-int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode)
+int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, u8 *mode)
{
u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
struct mlx5_core_dev *dev = esw->dev;
}
query_vports:
- for (vport = 1; vport <= nvfs; vport++) {
+ for (vport = 1; vport <= esw->esw_funcs.num_vfs; vport++) {
mlx5_query_nic_vport_min_inline(dev, vport, &mlx5_mode);
if (vport > 1 && prev_mlx5_mode != mlx5_mode)
return -EINVAL;
void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type)
{
- u16 max_vf = mlx5_core_max_vfs(esw->dev);
struct mlx5_eswitch_rep *rep;
int i;
if (esw->mode == MLX5_ESWITCH_OFFLOADS)
- __unload_reps_all_vport(esw, max_vf, rep_type);
+ __unload_reps_all_vport(esw, rep_type);
mlx5_esw_for_all_reps(esw, i, rep)
atomic_set(&rep->rep_data[rep_type].state, REP_UNREGISTERED);