int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
void *type_data, bool err_stop, bool rtnl_held)
{
+ bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
int ok_count;
+retry:
+ if (take_rtnl)
+ rtnl_lock();
down_read(&block->cb_lock);
+ /* Need to obtain rtnl lock if block is bound to devs that require it.
+ * In block bind code cb_lock is obtained while holding rtnl, so we must
+ * obtain the locks in same order here.
+ */
+ if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
+ up_read(&block->cb_lock);
+ take_rtnl = true;
+ goto retry;
+ }
+
ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
+
up_read(&block->cb_lock);
+ if (take_rtnl)
+ rtnl_unlock();
return ok_count;
}
EXPORT_SYMBOL(tc_setup_cb_call);
enum tc_setup_type type, void *type_data, bool err_stop,
u32 *flags, unsigned int *in_hw_count, bool rtnl_held)
{
+ bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
int ok_count;
+retry:
+ if (take_rtnl)
+ rtnl_lock();
down_read(&block->cb_lock);
+ /* Need to obtain rtnl lock if block is bound to devs that require it.
+ * In block bind code cb_lock is obtained while holding rtnl, so we must
+ * obtain the locks in same order here.
+ */
+ if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
+ up_read(&block->cb_lock);
+ take_rtnl = true;
+ goto retry;
+ }
+
/* Make sure all netdevs sharing this block are offload-capable. */
if (block->nooffloaddevcnt && err_stop) {
ok_count = -EOPNOTSUPP;
ok_count, true);
err_unlock:
up_read(&block->cb_lock);
+ if (take_rtnl)
+ rtnl_unlock();
return ok_count < 0 ? ok_count : 0;
}
EXPORT_SYMBOL(tc_setup_cb_add);
u32 *new_flags, unsigned int *new_in_hw_count,
bool rtnl_held)
{
+ bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
int ok_count;
+retry:
+ if (take_rtnl)
+ rtnl_lock();
down_read(&block->cb_lock);
+ /* Need to obtain rtnl lock if block is bound to devs that require it.
+ * In block bind code cb_lock is obtained while holding rtnl, so we must
+ * obtain the locks in same order here.
+ */
+ if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
+ up_read(&block->cb_lock);
+ take_rtnl = true;
+ goto retry;
+ }
+
/* Make sure all netdevs sharing this block are offload-capable. */
if (block->nooffloaddevcnt && err_stop) {
ok_count = -EOPNOTSUPP;
new_flags, ok_count, true);
err_unlock:
up_read(&block->cb_lock);
+ if (take_rtnl)
+ rtnl_unlock();
return ok_count < 0 ? ok_count : 0;
}
EXPORT_SYMBOL(tc_setup_cb_replace);
enum tc_setup_type type, void *type_data, bool err_stop,
u32 *flags, unsigned int *in_hw_count, bool rtnl_held)
{
+ bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
int ok_count;
+retry:
+ if (take_rtnl)
+ rtnl_lock();
down_read(&block->cb_lock);
+ /* Need to obtain rtnl lock if block is bound to devs that require it.
+ * In block bind code cb_lock is obtained while holding rtnl, so we must
+ * obtain the locks in same order here.
+ */
+ if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
+ up_read(&block->cb_lock);
+ take_rtnl = true;
+ goto retry;
+ }
+
ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
tc_cls_offload_cnt_reset(block, tp, in_hw_count, flags);
tp->ops->hw_del(tp, type_data);
up_read(&block->cb_lock);
+ if (take_rtnl)
+ rtnl_unlock();
return ok_count < 0 ? ok_count : 0;
}
EXPORT_SYMBOL(tc_setup_cb_destroy);