static int nfp_bpf_ndo_init(struct nfp_app *app, struct net_device *netdev)
{
- return bpf_offload_dev_netdev_register(netdev);
+ struct nfp_app_bpf *bpf = app->priv;
+
+ return bpf_offload_dev_netdev_register(bpf->bpf_dev, netdev);
}
static void nfp_bpf_ndo_uninit(struct nfp_app *app, struct net_device *netdev)
{
- bpf_offload_dev_netdev_unregister(netdev);
+ struct nfp_app_bpf *bpf = app->priv;
+
+ bpf_offload_dev_netdev_unregister(bpf->bpf_dev, netdev);
}
static int nfp_bpf_init(struct nfp_app *app)
if (err)
goto err_free_neutral_maps;
+ bpf->bpf_dev = bpf_offload_dev_create();
+ err = PTR_ERR_OR_ZERO(bpf->bpf_dev);
+ if (err)
+ goto err_free_neutral_maps;
+
return 0;
err_free_neutral_maps:
{
struct nfp_app_bpf *bpf = app->priv;
+ bpf_offload_dev_destroy(bpf->bpf_dev);
WARN_ON(!skb_queue_empty(&bpf->cmsg_replies));
WARN_ON(!list_empty(&bpf->map_list));
WARN_ON(bpf->maps_in_use || bpf->map_elems_in_use);
debugfs_create_dir("bpf_bound_progs", ns->sdev->ddir);
if (IS_ERR_OR_NULL(ns->sdev->ddir_bpf_bound_progs))
return -ENOMEM;
+
+ ns->sdev->bpf_dev = bpf_offload_dev_create();
+ err = PTR_ERR_OR_ZERO(ns->sdev->bpf_dev);
+ if (err)
+ return err;
}
- err = bpf_offload_dev_netdev_register(ns->netdev);
+ err = bpf_offload_dev_netdev_register(ns->sdev->bpf_dev, ns->netdev);
if (err)
- return err;
+ goto err_destroy_bdev;
debugfs_create_u32("bpf_offloaded_id", 0400, ns->ddir,
&ns->bpf_offloaded_id);
&ns->bpf_map_accept);
return 0;
+
+err_destroy_bdev:
+ if (ns->sdev->refcnt == 1)
+ bpf_offload_dev_destroy(ns->sdev->bpf_dev);
+ return err;
}
void nsim_bpf_uninit(struct netdevsim *ns)
WARN_ON(ns->xdp.prog);
WARN_ON(ns->xdp_hw.prog);
WARN_ON(ns->bpf_offloaded);
- bpf_offload_dev_netdev_unregister(ns->netdev);
+ bpf_offload_dev_netdev_unregister(ns->sdev->bpf_dev, ns->netdev);
if (ns->sdev->refcnt == 1) {
WARN_ON(!list_empty(&ns->sdev->bpf_bound_progs));
WARN_ON(!list_empty(&ns->sdev->bpf_bound_maps));
+ bpf_offload_dev_destroy(ns->sdev->bpf_dev);
}
}
char name[BPF_OBJ_NAME_LEN];
};
+struct bpf_offload_dev;
struct bpf_offloaded_map;
struct bpf_map_dev_ops {
bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map);
-int bpf_offload_dev_netdev_register(struct net_device *netdev);
-void bpf_offload_dev_netdev_unregister(struct net_device *netdev);
+struct bpf_offload_dev *bpf_offload_dev_create(void);
+void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev);
+int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev,
+ struct net_device *netdev);
+void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev,
+ struct net_device *netdev);
#if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL)
int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr);
*/
static DECLARE_RWSEM(bpf_devs_lock);
+struct bpf_offload_dev {
+ struct list_head netdevs;
+};
+
struct bpf_offload_netdev {
struct rhash_head l;
struct net_device *netdev;
+ struct bpf_offload_dev *offdev;
struct list_head progs;
struct list_head maps;
+ struct list_head offdev_netdevs;
};
static const struct rhashtable_params offdevs_params = {
return ret;
}
-int bpf_offload_dev_netdev_register(struct net_device *netdev)
+int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev,
+ struct net_device *netdev)
{
struct bpf_offload_netdev *ondev;
int err;
- down_write(&bpf_devs_lock);
- if (!offdevs_inited) {
- err = rhashtable_init(&offdevs, &offdevs_params);
- if (err)
- return err;
- offdevs_inited = true;
- }
- up_write(&bpf_devs_lock);
-
ondev = kzalloc(sizeof(*ondev), GFP_KERNEL);
if (!ondev)
return -ENOMEM;
ondev->netdev = netdev;
+ ondev->offdev = offdev;
INIT_LIST_HEAD(&ondev->progs);
INIT_LIST_HEAD(&ondev->maps);
goto err_unlock_free;
}
+ list_add(&ondev->offdev_netdevs, &offdev->netdevs);
up_write(&bpf_devs_lock);
return 0;
}
EXPORT_SYMBOL_GPL(bpf_offload_dev_netdev_register);
-void bpf_offload_dev_netdev_unregister(struct net_device *netdev)
+void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev,
+ struct net_device *netdev)
{
+ struct bpf_offload_netdev *ondev, *altdev;
struct bpf_offloaded_map *offmap, *mtmp;
struct bpf_prog_offload *offload, *ptmp;
- struct bpf_offload_netdev *ondev;
ASSERT_RTNL();
goto unlock;
WARN_ON(rhashtable_remove_fast(&offdevs, &ondev->l, offdevs_params));
-
- list_for_each_entry_safe(offload, ptmp, &ondev->progs, offloads)
- __bpf_prog_offload_destroy(offload->prog);
- list_for_each_entry_safe(offmap, mtmp, &ondev->maps, offloads)
- __bpf_map_offload_destroy(offmap);
+ list_del(&ondev->offdev_netdevs);
+
+ /* Try to move the objects to another netdev of the device */
+ altdev = list_first_entry_or_null(&offdev->netdevs,
+ struct bpf_offload_netdev,
+ offdev_netdevs);
+ if (altdev) {
+ list_for_each_entry(offload, &ondev->progs, offloads)
+ offload->netdev = altdev->netdev;
+ list_splice_init(&ondev->progs, &altdev->progs);
+
+ list_for_each_entry(offmap, &ondev->maps, offloads)
+ offmap->netdev = altdev->netdev;
+ list_splice_init(&ondev->maps, &altdev->maps);
+ } else {
+ list_for_each_entry_safe(offload, ptmp, &ondev->progs, offloads)
+ __bpf_prog_offload_destroy(offload->prog);
+ list_for_each_entry_safe(offmap, mtmp, &ondev->maps, offloads)
+ __bpf_map_offload_destroy(offmap);
+ }
WARN_ON(!list_empty(&ondev->progs));
WARN_ON(!list_empty(&ondev->maps));
up_write(&bpf_devs_lock);
}
EXPORT_SYMBOL_GPL(bpf_offload_dev_netdev_unregister);
+
+struct bpf_offload_dev *bpf_offload_dev_create(void)
+{
+ struct bpf_offload_dev *offdev;
+ int err;
+
+ down_write(&bpf_devs_lock);
+ if (!offdevs_inited) {
+ err = rhashtable_init(&offdevs, &offdevs_params);
+ if (err)
+ return ERR_PTR(err);
+ offdevs_inited = true;
+ }
+ up_write(&bpf_devs_lock);
+
+ offdev = kzalloc(sizeof(*offdev), GFP_KERNEL);
+ if (!offdev)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&offdev->netdevs);
+
+ return offdev;
+}
+EXPORT_SYMBOL_GPL(bpf_offload_dev_create);
+
+void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev)
+{
+ WARN_ON(!list_empty(&offdev->netdevs));
+ kfree(offdev);
+}
+EXPORT_SYMBOL_GPL(bpf_offload_dev_destroy);