Maximum number of packets, queued on the INPUT side, when the interface
receives packets faster than kernel can process them.
+netdev_tstamp_prequeue
+----------------------
+
+If set to 0, RX packet timestamps can be sampled after RPS processing, when
+the target CPU processes packets. It might give some delay on timestamps, but
+permit to distribute the load on several cpus.
+
+If set to 1 (default), timestamps are sampled as soon as possible, before
+queueing.
+
optmem_max
----------
}
EXPORT_SYMBOL(net_disable_timestamp);
-static inline void net_timestamp(struct sk_buff *skb)
+static inline void net_timestamp_set(struct sk_buff *skb)
{
if (atomic_read(&netstamp_needed))
__net_timestamp(skb);
skb->tstamp.tv64 = 0;
}
+static inline void net_timestamp_check(struct sk_buff *skb)
+{
+ if (!skb->tstamp.tv64 && atomic_read(&netstamp_needed))
+ __net_timestamp(skb);
+}
+
/**
* dev_forward_skb - loopback an skb to another netif
*
#ifdef CONFIG_NET_CLS_ACT
if (!(skb->tstamp.tv64 && (G_TC_FROM(skb->tc_verd) & AT_INGRESS)))
- net_timestamp(skb);
+ net_timestamp_set(skb);
#else
- net_timestamp(skb);
+ net_timestamp_set(skb);
#endif
rcu_read_lock();
=======================================================================*/
int netdev_max_backlog __read_mostly = 1000;
+int netdev_tstamp_prequeue __read_mostly = 1;
int netdev_budget __read_mostly = 300;
int weight_p __read_mostly = 64; /* old backlog weight */
if (netpoll_rx(skb))
return NET_RX_DROP;
- if (!skb->tstamp.tv64)
- net_timestamp(skb);
+ if (netdev_tstamp_prequeue)
+ net_timestamp_check(skb);
#ifdef CONFIG_RPS
{
int ret = NET_RX_DROP;
__be16 type;
- if (!skb->tstamp.tv64)
- net_timestamp(skb);
+ if (!netdev_tstamp_prequeue)
+ net_timestamp_check(skb);
if (vlan_tx_tag_present(skb) && vlan_hwaccel_do_receive(skb))
return NET_RX_SUCCESS;
*/
int netif_receive_skb(struct sk_buff *skb)
{
+ if (netdev_tstamp_prequeue)
+ net_timestamp_check(skb);
+
#ifdef CONFIG_RPS
- struct rps_dev_flow voidflow, *rflow = &voidflow;
- int cpu, ret;
+ {
+ struct rps_dev_flow voidflow, *rflow = &voidflow;
+ int cpu, ret;
- rcu_read_lock();
+ rcu_read_lock();
+
+ cpu = get_rps_cpu(skb->dev, skb, &rflow);
- cpu = get_rps_cpu(skb->dev, skb, &rflow);
+ if (cpu >= 0) {
+ ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
+ rcu_read_unlock();
+ } else {
+ rcu_read_unlock();
+ ret = __netif_receive_skb(skb);
+ }
- if (cpu >= 0) {
- ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
- rcu_read_unlock();
- } else {
- rcu_read_unlock();
- ret = __netif_receive_skb(skb);
+ return ret;
}
-
- return ret;
#else
return __netif_receive_skb(skb);
#endif
.mode = 0644,
.proc_handler = proc_dointvec
},
+ {
+ .procname = "netdev_tstamp_prequeue",
+ .data = &netdev_tstamp_prequeue,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec
+ },
{
.procname = "message_cost",
.data = &net_ratelimit_state.interval,