net: sched: red: don't reset the backlog on every stat dump
authorJakub Kicinski <jakub.kicinski@netronome.com>
Mon, 15 Jan 2018 04:01:26 +0000 (20:01 -0800)
committerDavid S. Miller <davem@davemloft.net>
Wed, 17 Jan 2018 19:29:32 +0000 (14:29 -0500)
Commit 0dfb33a0d7e2 ("sch_red: report backlog information") copied
child's backlog into RED's backlog.  Back then RED did not maintain
its own backlog counts.  This has changed after commit 2ccccf5fb43f
("net_sched: update hierarchical backlog too") and commit d7f4f332f082
("sch_red: update backlog as well").  Copying is no longer necessary.

Tested:

$ tc -s qdisc show dev veth0
qdisc red 1: root refcnt 2 limit 400000b min 30000b max 30000b ecn
 Sent 20942 bytes 221 pkt (dropped 0, overlimits 0 requeues 0)
 backlog 1260b 14p requeues 14
  marked 0 early 0 pdrop 0 other 0
qdisc tbf 2: parent 1: rate 1Kbit burst 15000b lat 3585.0s
 Sent 20942 bytes 221 pkt (dropped 0, overlimits 138 requeues 0)
 backlog 1260b 14p requeues 14

Recently RED offload was added.  We need to make sure drivers don't
depend on resetting the stats.  This means backlog should be treated
like any other statistic:

  total_stat = new_hw_stat - prev_hw_stat;

Adjust mlxsw.

Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>
Acked-by: Nogah Frankel <nogahf@mellanox.com>
Acked-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
include/net/pkt_cls.h
net/sched/sch_red.c

index fee9da81bce6b9aebba4abac22f4aa2d0864525e..0b76704590512a75c20152f2a5ee8e62470d4e49 100644 (file)
@@ -247,6 +247,8 @@ mlxsw_sp_setup_tc_qdisc_red_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
 
        stats_base->overlimits = red_base->prob_drop + red_base->prob_mark;
        stats_base->drops = red_base->prob_drop + red_base->pdrop;
+
+       stats_base->backlog = 0;
 }
 
 static int
@@ -306,6 +308,19 @@ mlxsw_sp_qdisc_red_replace(struct mlxsw_sp_port *mlxsw_sp_port,
                                                 max, prob, p->is_ecn);
 }
 
+static void
+mlxsw_sp_qdisc_red_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
+                            struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+                            void *params)
+{
+       struct tc_red_qopt_offload_params *p = params;
+       u64 backlog;
+
+       backlog = mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
+                                      mlxsw_sp_qdisc->stats_base.backlog);
+       p->qstats->backlog -= backlog;
+}
+
 static int
 mlxsw_sp_qdisc_get_red_xstats(struct mlxsw_sp_port *mlxsw_sp_port,
                              struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
@@ -338,7 +353,7 @@ mlxsw_sp_qdisc_get_red_stats(struct mlxsw_sp_port *mlxsw_sp_port,
                             struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
                             struct tc_qopt_offload_stats *stats_ptr)
 {
-       u64 tx_bytes, tx_packets, overlimits, drops;
+       u64 tx_bytes, tx_packets, overlimits, drops, backlog;
        u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
        struct mlxsw_sp_qdisc_stats *stats_base;
        struct mlxsw_sp_port_xstats *xstats;
@@ -354,14 +369,18 @@ mlxsw_sp_qdisc_get_red_stats(struct mlxsw_sp_port *mlxsw_sp_port,
                     stats_base->overlimits;
        drops = xstats->wred_drop[tclass_num] + xstats->tail_drop[tclass_num] -
                stats_base->drops;
+       backlog = xstats->backlog[tclass_num];
 
        _bstats_update(stats_ptr->bstats, tx_bytes, tx_packets);
        stats_ptr->qstats->overlimits += overlimits;
        stats_ptr->qstats->drops += drops;
        stats_ptr->qstats->backlog +=
-                       mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
-                                            xstats->backlog[tclass_num]);
+                               mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
+                                                    backlog) -
+                               mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
+                                                    stats_base->backlog);
 
+       stats_base->backlog = backlog;
        stats_base->drops +=  drops;
        stats_base->overlimits += overlimits;
        stats_base->tx_bytes += tx_bytes;
@@ -375,6 +394,7 @@ static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_red = {
        .type = MLXSW_SP_QDISC_RED,
        .check_params = mlxsw_sp_qdisc_red_check_params,
        .replace = mlxsw_sp_qdisc_red_replace,
+       .unoffload = mlxsw_sp_qdisc_red_unoffload,
        .destroy = mlxsw_sp_qdisc_red_destroy,
        .get_stats = mlxsw_sp_qdisc_get_red_stats,
        .get_xstats = mlxsw_sp_qdisc_get_red_xstats,
index 9c341f003091edfcad5a039b27a9a3ad5b9db64a..cc23c041a6d739bd4966456b9011521d930dfa4a 100644 (file)
@@ -748,6 +748,7 @@ struct tc_red_qopt_offload_params {
        u32 max;
        u32 probability;
        bool is_ecn;
+       struct gnet_stats_queue *qstats;
 };
 
 struct tc_red_qopt_offload {
index 0af1c1254e0b9ced5e2dcdd5abb90754284a2056..16644b3d236271533b6e96a62705bc1903ab1d8a 100644 (file)
@@ -167,6 +167,7 @@ static int red_offload(struct Qdisc *sch, bool enable)
                opt.set.max = q->parms.qth_max >> q->parms.Wlog;
                opt.set.probability = q->parms.max_P;
                opt.set.is_ecn = red_use_ecn(q);
+               opt.set.qstats = &sch->qstats;
        } else {
                opt.command = TC_RED_DESTROY;
        }
@@ -322,7 +323,6 @@ static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
        };
        int err;
 
-       sch->qstats.backlog = q->qdisc->qstats.backlog;
        err = red_dump_offload_stats(sch, &opt);
        if (err)
                goto nla_put_failure;