* @tx_ring: Tx descriptor ring
* @cd_tunneling: ptr to context desc bits
**/
-static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
- u32 *td_cmd, u32 *td_offset,
- struct i40e_ring *tx_ring,
- u32 *cd_tunneling)
+static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
+ u32 *td_cmd, u32 *td_offset,
+ struct i40e_ring *tx_ring,
+ u32 *cd_tunneling)
{
union {
struct iphdr *v4;
__be16 frag_off;
u8 l4_proto = 0;
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return 0;
+
ip.hdr = skb_network_header(skb);
l4.hdr = skb_transport_header(skb);
*tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
break;
default:
- return;
+ if (*tx_flags & I40E_TX_FLAGS_TSO)
+ return -1;
+
+ skb_checksum_help(skb);
+ return 0;
}
/* compute tunnel header size */
I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
break;
default:
- break;
+ if (*tx_flags & I40E_TX_FLAGS_TSO)
+ return -1;
+ skb_checksum_help(skb);
+ return 0;
}
*td_cmd |= cmd;
*td_offset |= offset;
+
+ return 1;
}
/**
td_cmd |= I40E_TX_DESC_CMD_ICRC;
/* Always offload the checksum, since it's in the data descriptor */
- if (skb->ip_summed == CHECKSUM_PARTIAL) {
- tx_flags |= I40E_TX_FLAGS_CSUM;
-
- i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
- tx_ring, &cd_tunneling);
- }
+ tso = i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
+ tx_ring, &cd_tunneling);
+ if (tso < 0)
+ goto out_drop;
i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
cd_tunneling, cd_l2tag2);
#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
#define I40E_MIN_DESC_PENDING 4
-#define I40E_TX_FLAGS_CSUM BIT(0)
#define I40E_TX_FLAGS_HW_VLAN BIT(1)
#define I40E_TX_FLAGS_SW_VLAN BIT(2)
#define I40E_TX_FLAGS_TSO BIT(3)
* @tx_flags: pointer to Tx flags currently set
* @td_cmd: Tx descriptor command bits to set
* @td_offset: Tx descriptor header offsets to set
+ * @tx_ring: Tx descriptor ring
* @cd_tunneling: ptr to context desc bits
**/
-static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
- u32 *td_cmd, u32 *td_offset,
- struct i40e_ring *tx_ring,
- u32 *cd_tunneling)
+static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
+ u32 *td_cmd, u32 *td_offset,
+ struct i40e_ring *tx_ring,
+ u32 *cd_tunneling)
{
union {
struct iphdr *v4;
__be16 frag_off;
u8 l4_proto = 0;
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return 0;
+
ip.hdr = skb_network_header(skb);
l4.hdr = skb_transport_header(skb);
*tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL;
break;
default:
- return;
+ if (*tx_flags & I40E_TX_FLAGS_TSO)
+ return -1;
+
+ skb_checksum_help(skb);
+ return 0;
}
/* compute tunnel header size */
I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
break;
default:
- break;
+ if (*tx_flags & I40E_TX_FLAGS_TSO)
+ return -1;
+ skb_checksum_help(skb);
+ return 0;
}
*td_cmd |= cmd;
*td_offset |= offset;
+
+ return 1;
}
/**
td_cmd |= I40E_TX_DESC_CMD_ICRC;
/* Always offload the checksum, since it's in the data descriptor */
- if (skb->ip_summed == CHECKSUM_PARTIAL) {
- tx_flags |= I40E_TX_FLAGS_CSUM;
-
- i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
- tx_ring, &cd_tunneling);
- }
+ tso = i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
+ tx_ring, &cd_tunneling);
+ if (tso < 0)
+ goto out_drop;
i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
cd_tunneling, cd_l2tag2);
#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
#define I40E_MIN_DESC_PENDING 4
-#define I40E_TX_FLAGS_CSUM BIT(0)
#define I40E_TX_FLAGS_HW_VLAN BIT(1)
#define I40E_TX_FLAGS_SW_VLAN BIT(2)
#define I40E_TX_FLAGS_TSO BIT(3)