M: "K. Y. Srinivasan" <kys@microsoft.com>
M: Haiyang Zhang <haiyangz@microsoft.com>
M: Stephen Hemminger <sthemmin@microsoft.com>
+ M: Sasha Levin <sashal@kernel.org>
+ T: git git://git.kernel.org/pub/scm/linux/kernel/git/hyperv/linux.git
L: devel@linuxdriverproject.org
- S: Maintained
+ S: Supported
-F: Documentation/networking/netvsc.txt
+F: Documentation/networking/device_drivers/microsoft/netvsc.txt
F: arch/x86/include/asm/mshyperv.h
F: arch/x86/include/asm/trace/hyperv.h
F: arch/x86/include/asm/hyperv-tlfs.h
csk->txq_idx = (rxq_idx < cdev->lldi->ntxq) ? rxq_idx :
port_id * step;
csk->sndbuf = newsk->sk_sndbuf;
- csk->smac_idx = cxgb4_tp_smt_idx(cdev->lldi->adapter_type,
- cxgb4_port_viid(ndev));
+ csk->smac_idx = ((struct port_info *)netdev_priv(ndev))->smt_idx;
- tp->rcv_wnd = select_rcv_wnd(csk);
RCV_WSCALE(tp) = select_rcv_wscale(tcp_full_space(newsk),
- WSCALE_OK(tp),
+ sock_net(newsk)->
+ ipv4.sysctl_tcp_window_scaling,
tp->window_clamp);
neigh_release(n);
inet_inherit_port(&tcp_hashinfo, lsk, newsk);
netdev->hw_features |= NETIF_F_RXCSUM;
netdev->features |= netdev->hw_features;
-
- eth_hw_addr_random(netdev);
-
- netdev->min_mtu = ETH_MIN_MTU;
- mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
- netdev->max_mtu = MLX5E_HW2SW_MTU(&priv->channels.params, max_mtu);
}
- static int mlx5e_rep_get_default_num_channels(struct mlx5_eswitch_rep *rep,
- struct net_device *netdev)
- {
- if (rep->vport == FDB_UPLINK_VPORT)
- return mlx5e_get_netdev_max_channels(netdev);
- else
- return 1;
- }
-
static int mlx5e_init_rep(struct mlx5_core_dev *mdev,
struct net_device *netdev,
const struct mlx5e_profile *profile,
if (err)
return err;
- priv->channels.params.num_channels =
- mlx5e_rep_get_default_num_channels(rpriv->rep, netdev);
+ priv->channels.params.num_channels = MLX5E_REP_PARAMS_DEF_NUM_CHANNELS;
- mlx5e_build_rep_params(mdev, &priv->channels.params, netdev->mtu);
+ mlx5e_build_rep_params(netdev);
mlx5e_build_rep_netdev(netdev);
mlx5e_timestamp_init(priv);
struct mlx5_flow_handle *rule;
memcpy(slow_attr, flow->esw_attr, sizeof(*slow_attr));
- slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
- slow_attr->split_count = 0,
- slow_attr->dest_chain = FDB_SLOW_PATH_CHAIN,
+ slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
- slow_attr->mirror_count = 0;
++ slow_attr->split_count = 0;
+ slow_attr->dest_chain = FDB_SLOW_PATH_CHAIN;
rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, slow_attr);
if (!IS_ERR(rule))
struct mlx5_esw_flow_attr *slow_attr)
{
memcpy(slow_attr, flow->esw_attr, sizeof(*slow_attr));
- slow_attr->mirror_count = 0;
+ slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
++ slow_attr->split_count = 0;
+ slow_attr->dest_chain = FDB_SLOW_PATH_CHAIN;
mlx5e_tc_unoffload_fdb_rules(esw, flow, slow_attr);
flow->flags &= ~MLX5E_TC_FLOW_SLOW;
}
struct mlx5e_rep_priv *rpriv;
struct mlx5e_priv *out_priv;
int err = 0, encap_err = 0;
+ int out_index;
- /* if prios are not supported, keep the old behaviour of using same prio
- * for all offloaded rules.
- */
- if (!mlx5_eswitch_prios_supported(esw))
- attr->prio = 1;
+ if (!mlx5_eswitch_prios_supported(esw) && attr->prio != 1) {
+ NL_SET_ERR_MSG(extack, "E-switch priorities unsupported, upgrade FW");
+ return -EOPNOTSUPP;
+ }
if (attr->chain > max_chain) {
NL_SET_ERR_MSG(extack, "Requested chain is out of supported range");
if (!actions_match_supported(priv, exts, parse_attr, flow, extack))
return -EOPNOTSUPP;
- if (attr->mirror_count > 0 && !mlx5_esw_has_fwd_fdb(priv->mdev)) {
+ if (attr->dest_chain) {
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
+ NL_SET_ERR_MSG(extack, "Mirroring goto chain rules isn't supported");
+ return -EOPNOTSUPP;
+ }
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ }
+
+ if (attr->split_count > 0 && !mlx5_esw_has_fwd_fdb(priv->mdev)) {
NL_SET_ERR_MSG_MOD(extack,
"current firmware doesn't support split rule for port mirroring");
netdev_warn_once(priv->netdev, "current firmware doesn't support split rule for port mirroring\n");
MLXSW_SP_RXL_MARK(IPV6_PIM, TRAP_TO_CPU, PIM, false),
MLXSW_SP_RXL_MARK(RPF, TRAP_TO_CPU, RPF, false),
MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false),
- MLXSW_SP_RXL_MR_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false),
+ MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false),
/* NVE traps */
MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP, TRAP_TO_CPU, ARP, false),
+ MLXSW_SP_RXL_NO_MARK(NVE_DECAP_ARP, TRAP_TO_CPU, ARP, false),
};
static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
list_add(&vxlan->next, &vn->vxlan_list);
return 0;
+
errout:
+ /* unregister_netdevice() destroys the default FDB entry with deletion
+ * notification. But the addition notification was not sent yet, so
+ * destroy the entry by hand here.
+ */
if (f)
- vxlan_fdb_destroy(vxlan, f, false);
+ vxlan_fdb_destroy(vxlan, f, false, false);
+ if (unregister)
+ unregister_netdevice(dev);
return err;
}
{
struct vxlan_dev *vxlan = netdev_priv(dev);
struct vxlan_rdst *dst = &vxlan->default_dst;
+ unsigned long old_age_interval;
struct vxlan_rdst old_dst;
struct vxlan_config conf;
- struct vxlan_fdb *f = NULL;
int err;
err = vxlan_nl2conf(tb, data,
vxlan->cfg.dst_port,
old_dst.remote_vni,
old_dst.remote_vni,
- old_dst.remote_ifindex, 0);
+ old_dst.remote_ifindex,
+ true);
if (!vxlan_addr_any(&dst->remote_ip)) {
- err = vxlan_fdb_create(vxlan, all_zeros_mac,
+ err = vxlan_fdb_update(vxlan, all_zeros_mac,
&dst->remote_ip,
NUD_REACHABLE | NUD_PERMANENT,
+ NLM_F_APPEND | NLM_F_CREATE,
vxlan->cfg.dst_port,
dst->remote_vni,
dst->remote_vni,
dst->remote_ifindex,
- NTF_SELF, &f);
- NTF_SELF);
++ NTF_SELF, false);
if (err) {
spin_unlock_bh(&vxlan->hash_lock);
return err;
return -EINVAL;
}
+ if (ndm->ndm_flags & ~NTF_PROXY) {
+ NL_SET_ERR_MSG(extack, "Invalid flags in header for neighbor dump request");
+ return -EINVAL;
+ }
+
err = nlmsg_parse_strict(nlh, sizeof(struct ndmsg), tb, NDA_MAX,
- NULL, extack);
+ nda_policy, extack);
} else {
err = nlmsg_parse(nlh, sizeof(struct ndmsg), tb, NDA_MAX,
- NULL, extack);
+ nda_policy, extack);
}
if (err < 0)
return err;
__IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
__IP6_ADD_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTOCTETS, skb->len);
+#ifdef CONFIG_NET_SWITCHDEV
+ if (skb->offload_l3_fwd_mark) {
+ consume_skb(skb);
+ return 0;
+ }
+#endif
+
+ skb->tstamp = 0;
return dst_output(net, sk, skb);
}
skb->mark = xfrm_smark_get(skb->mark, x);
- skb->sp->xvec[skb->sp->len++] = x;
+ sp->xvec[sp->len++] = x;
+ skb_dst_force(skb);
+ if (!skb_dst(skb)) {
+ XFRM_INC_STATS(net, LINUX_MIB_XFRMINERROR);
+ goto drop;
+ }
+
lock:
spin_lock(&x->lock);
.result = ACCEPT,
},
{
+ "check wire_len is not readable by sockets",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, wire_len)),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+ },
+ {
+ "check wire_len is readable by tc classifier",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, wire_len)),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ },
+ {
+ "check wire_len is not writable by tc classifier",
+ .insns = {
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
+ offsetof(struct __sk_buff, wire_len)),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "invalid bpf_context access",
+ .errstr_unpriv = "R1 leaks addr",
+ .result = REJECT,
+ },
+ "calls: cross frame pruning",
+ .insns = {
+ /* r8 = !!random();
+ * call pruner()
+ * if (r8)
+ * do something bad;
+ */
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_get_prandom_u32),
+ BPF_MOV64_IMM(BPF_REG_8, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_MOV64_IMM(BPF_REG_8, 1),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, 1, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
+ .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
+ .result_unpriv = REJECT,
+ .errstr = "!read_ok",
+ .result = REJECT,
+ },
};
static int probe_filter_length(const struct bpf_insn *fp)
CFLAGS = -Wall -Wl,--no-as-needed -O2 -g
CFLAGS += -I../../../../usr/include/
-TEST_PROGS := run_netsocktests run_afpackettests test_bpf.sh netdevice.sh rtnetlink.sh
+TEST_PROGS := run_netsocktests run_afpackettests test_bpf.sh netdevice.sh \
+ rtnetlink.sh xfrm_policy.sh
TEST_PROGS += fib_tests.sh fib-onlink-tests.sh pmtu.sh udpgso.sh ip_defrag.sh
TEST_PROGS += udpgso_bench.sh fib_rule_tests.sh msg_zerocopy.sh psock_snd.sh
+TEST_PROGS += udpgro_bench.sh udpgro.sh test_vxlan_under_vrf.sh reuseport_addr_any.sh
+ TEST_PROGS += test_vxlan_fdb_changelink.sh
TEST_PROGS_EXTENDED := in_netns.sh
TEST_GEN_FILES = socket
-TEST_GEN_FILES += psock_fanout psock_tpacket msg_zerocopy
-TEST_GEN_FILES += tcp_mmap tcp_inq psock_snd
+TEST_GEN_FILES += psock_fanout psock_tpacket msg_zerocopy reuseport_addr_any
+TEST_GEN_FILES += tcp_mmap tcp_inq psock_snd txring_overwrite
TEST_GEN_FILES += udpgso udpgso_bench_tx udpgso_bench_rx ip_defrag
TEST_GEN_PROGS = reuseport_bpf reuseport_bpf_cpu reuseport_bpf_numa
TEST_GEN_PROGS += reuseport_dualstack reuseaddr_conflict tls