struct list_head filters;
struct rcu_work rwork;
struct list_head list;
+ refcount_t refcnt;
};
struct fl_flow_tmplt {
static void fl_mask_free(struct fl_flow_mask *mask)
{
+ WARN_ON(!list_empty(&mask->filters));
rhashtable_destroy(&mask->ht);
kfree(mask);
}
static bool fl_mask_put(struct cls_fl_head *head, struct fl_flow_mask *mask,
bool async)
{
- if (!list_empty(&mask->filters))
+ if (!refcount_dec_and_test(&mask->refcnt))
return false;
rhashtable_remove_fast(&head->ht, &mask->ht_node, mask_ht_params);
INIT_LIST_HEAD_RCU(&newmask->filters);
+ refcount_set(&newmask->refcnt, 1);
err = rhashtable_insert_fast(&head->ht, &newmask->ht_node,
mask_ht_params);
if (err)
struct fl_flow_mask *mask)
{
struct fl_flow_mask *newmask;
+ int ret = 0;
+ rcu_read_lock();
fnew->mask = rhashtable_lookup_fast(&head->ht, mask, mask_ht_params);
if (!fnew->mask) {
+ rcu_read_unlock();
+
if (fold)
return -EINVAL;
return PTR_ERR(newmask);
fnew->mask = newmask;
+ return 0;
} else if (fold && fold->mask != fnew->mask) {
- return -EINVAL;
+ ret = -EINVAL;
+ } else if (!refcount_inc_not_zero(&fnew->mask->refcnt)) {
+ /* Mask was deleted concurrently, try again */
+ ret = -EAGAIN;
}
-
- return 0;
+ rcu_read_unlock();
+ return ret;
}
static int fl_set_parms(struct net *net, struct tcf_proto *tp,
list_replace_rcu(&fold->list, &fnew->list);
fold->deleted = true;
+ fl_mask_put(head, fold->mask, true);
if (!tc_skip_hw(fold->flags))
fl_hw_destroy_filter(tp, fold, NULL);
tcf_unbind_filter(tp, &fold->res);
if (!tc_skip_hw(fnew->flags))
fl_hw_destroy_filter(tp, fnew, NULL);
errout_mask:
- fl_mask_put(head, fnew->mask, false);
+ fl_mask_put(head, fnew->mask, true);
errout:
tcf_exts_destroy(&fnew->exts);
kfree(fnew);