Two cases of overlapping changes, nothing fancy.
Signed-off-by: David S. Miller <davem@davemloft.net>
* If the @state->interface mode is %PHY_INTERFACE_MODE_1000BASEX
* or %PHY_INTERFACE_MODE_2500BASEX, select the appropriate mode
* based on @state->advertising and/or @state->speed and update
- * @state->interface accordingly.
+ * @state->interface accordingly. See phylink_helper_basex_speed().
+ *
+ * When @state->interface is %PHY_INTERFACE_MODE_NA, phylink expects the
+ * MAC driver to return all supported link modes.
+ *
+ * If the @state->interface mode is not supported, then the @supported
+ * mask must be cleared.
*/
-void validate(struct net_device *ndev, unsigned long *supported,
+void validate(struct phylink_config *config, unsigned long *supported,
struct phylink_link_state *state);
/**
};
#define TLS_OFFLOAD_CONTEXT_SIZE_RX \
- (ALIGN(sizeof(struct tls_offload_context_rx), sizeof(void *)) + \
- TLS_DRIVER_STATE_SIZE)
+ (sizeof(struct tls_offload_context_rx) + TLS_DRIVER_STATE_SIZE_RX)
+ void tls_ctx_free(struct tls_context *ctx);
int wait_on_pending_writer(struct sock *sk, long *timeo);
int tls_sk_query(struct sock *sk, int optname, char __user *optval,
int __user *optlen);
struct xdp_umem *umem;
struct list_head flush_node;
u16 queue_id;
- struct xsk_queue *tx ____cacheline_aligned_in_smp;
- struct list_head list;
bool zc;
+ enum {
+ XSK_READY = 0,
+ XSK_BOUND,
+ XSK_UNBOUND,
+ } state;
/* Protects multiple processes in the control path */
struct mutex mutex;
+ struct xsk_queue *tx ____cacheline_aligned_in_smp;
+ struct list_head list;
/* Mutual exclusion of NAPI TX thread and sendmsg error paths
* in the SKB destructor callback.
*/
ifa->ifa_flags &= ~IFA_F_SECONDARY;
last_primary = &in_dev->ifa_list;
- for (ifap = &in_dev->ifa_list; (ifa1 = *ifap) != NULL;
- ifap = &ifa1->ifa_next) {
+ /* Don't set IPv6 only flags to IPv4 addresses */
+ ifa->ifa_flags &= ~IPV6ONLY_FLAGS;
+
+ ifap = &in_dev->ifa_list;
+ ifa1 = rtnl_dereference(*ifap);
+
+ while (ifa1) {
if (!(ifa1->ifa_flags & IFA_F_SECONDARY) &&
ifa->ifa_scope <= ifa1->ifa_scope)
last_primary = &ifa1->ifa_next;
WARN_ON(ctinfo != IP_CT_RELATED && ctinfo != IP_CT_RELATED_REPLY);
- if (!skb_make_writable(skb, hdrlen + sizeof(*inside)))
+ if (skb_ensure_writable(skb, hdrlen + sizeof(*inside)))
return 0;
- if (nf_ip_checksum(skb, hooknum, hdrlen, 0))
+ if (nf_ip_checksum(skb, hooknum, hdrlen, IPPROTO_ICMP))
return 0;
inside = (void *)skb->data + hdrlen;
sock_set_flag(sk, SOCK_RCU_FREE);
xs = xdp_sk(sk);
+ xs->state = XSK_READY;
mutex_init(&xs->mutex);
+ spin_lock_init(&xs->rx_lock);
spin_lock_init(&xs->tx_completion_lock);
mutex_lock(&net->xdp.lock);
CONFIG_NF_TABLES_IPV4=y
CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV4=m
+CONFIG_NET_SCH_FQ=m
+CONFIG_NET_SCH_ETF=m
+CONFIG_TEST_BLACKHOLE_DEV=m
+ CONFIG_KALLSYMS=y