net: sched: cls_bpf: implement offload tcf_proto_op
authorJohn Hurley <john.hurley@netronome.com>
Mon, 25 Jun 2018 21:30:09 +0000 (14:30 -0700)
committerDavid S. Miller <davem@davemloft.net>
Tue, 26 Jun 2018 14:21:33 +0000 (23:21 +0900)
Add the offload tcf_proto_op in cls_bpf to generate an offload message for
each bpf prog in the given tcf_proto. Call the specified callback with
this new offload message. The function only returns an error if the
callback rejects adding a 'hardware only' prog.

A prog contains a flag to indicate if it is in hardware or not. To
ensure the offload function properly maintains this flag, keep a reference
counter for the number of instances of the prog that are in hardware. Only
update the flag when this counter changes from or to 0.

Signed-off-by: John Hurley <john.hurley@netronome.com>
Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
net/sched/cls_bpf.c

index 1aa7f6511065a1d1da3eda4a409c66c5ce0bc773..66e0ac9811f9e3811a9b3b19bb453022c7004879 100644 (file)
@@ -43,6 +43,7 @@ struct cls_bpf_prog {
        struct tcf_result res;
        bool exts_integrated;
        u32 gen_flags;
+       unsigned int in_hw_count;
        struct tcf_exts exts;
        u32 handle;
        u16 bpf_num_ops;
@@ -174,6 +175,7 @@ static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog,
                        cls_bpf_offload_cmd(tp, oldprog, prog, extack);
                        return err;
                } else if (err > 0) {
+                       prog->in_hw_count = err;
                        tcf_block_offload_inc(block, &prog->gen_flags);
                }
        }
@@ -652,6 +654,42 @@ skip:
        }
 }
 
+static int cls_bpf_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb,
+                            void *cb_priv, struct netlink_ext_ack *extack)
+{
+       struct cls_bpf_head *head = rtnl_dereference(tp->root);
+       struct tcf_block *block = tp->chain->block;
+       struct tc_cls_bpf_offload cls_bpf = {};
+       struct cls_bpf_prog *prog;
+       int err;
+
+       list_for_each_entry(prog, &head->plist, link) {
+               if (tc_skip_hw(prog->gen_flags))
+                       continue;
+
+               tc_cls_common_offload_init(&cls_bpf.common, tp, prog->gen_flags,
+                                          extack);
+               cls_bpf.command = TC_CLSBPF_OFFLOAD;
+               cls_bpf.exts = &prog->exts;
+               cls_bpf.prog = add ? prog->filter : NULL;
+               cls_bpf.oldprog = add ? NULL : prog->filter;
+               cls_bpf.name = prog->bpf_name;
+               cls_bpf.exts_integrated = prog->exts_integrated;
+
+               err = cb(TC_SETUP_CLSBPF, &cls_bpf, cb_priv);
+               if (err) {
+                       if (add && tc_skip_sw(prog->gen_flags))
+                               return err;
+                       continue;
+               }
+
+               tc_cls_offload_cnt_update(block, &prog->in_hw_count,
+                                         &prog->gen_flags, add);
+       }
+
+       return 0;
+}
+
 static struct tcf_proto_ops cls_bpf_ops __read_mostly = {
        .kind           =       "bpf",
        .owner          =       THIS_MODULE,
@@ -662,6 +700,7 @@ static struct tcf_proto_ops cls_bpf_ops __read_mostly = {
        .change         =       cls_bpf_change,
        .delete         =       cls_bpf_delete,
        .walk           =       cls_bpf_walk,
+       .reoffload      =       cls_bpf_reoffload,
        .dump           =       cls_bpf_dump,
        .bind_class     =       cls_bpf_bind_class,
 };