return hdr_len + skb_gso_transport_seglen(skb);
}
+/* Local Checksum Offload.
+ * Compute outer checksum based on the assumption that the
+ * inner checksum will be offloaded later.
+ * Fill in outer checksum adjustment (e.g. with sum of outer
+ * pseudo-header) before calling.
+ * Also ensure that inner checksum is in linear data area.
+ */
+static inline __wsum lco_csum(struct sk_buff *skb)
+{
+ char *inner_csum_field;
+ __wsum csum;
+
+ /* Start with complement of inner checksum adjustment */
+ inner_csum_field = skb->data + skb_checksum_start_offset(skb) +
+ skb->csum_offset;
+ csum = ~csum_unfold(*(__force __sum16 *)inner_csum_field);
+ /* Add in checksum of our headers (incl. outer checksum
+ * adjustment filled in by caller)
+ */
+ csum = skb_checksum(skb, 0, skb_checksum_start_offset(skb), csum);
+ /* The result is the checksum from skb->data to end of packet */
+ return csum;
+}
+
#endif /* __KERNEL__ */
#endif /* _LINUX_SKBUFF_H */
return skb;
}
- /* If packet is not gso and we are resolving any partial checksum,
+ /* If packet is not gso and we are not offloading inner checksum,
* clear encapsulation flag. This allows setting CHECKSUM_PARTIAL
* on the outer header without confusing devices that implement
* NETIF_F_IP_CSUM with encapsulation.
*/
- if (csum_help)
- skb->encapsulation = 0;
-
if (skb->ip_summed == CHECKSUM_PARTIAL && csum_help) {
+ skb->encapsulation = 0;
err = skb_checksum_help(skb);
if (unlikely(err))
goto error;
- } else if (skb->ip_summed != CHECKSUM_PARTIAL)
+ } else if (skb->ip_summed != CHECKSUM_PARTIAL) {
skb->ip_summed = CHECKSUM_NONE;
+ skb->encapsulation = 0;
+ }
return skb;
error:
{
struct udphdr *uh = udp_hdr(skb);
- if (nocheck)
+ if (nocheck) {
uh->check = 0;
- else if (skb_is_gso(skb))
+ } else if (skb_is_gso(skb)) {
uh->check = ~udp_v4_check(len, saddr, daddr, 0);
- else if (skb_dst(skb) && skb_dst(skb)->dev &&
- (skb_dst(skb)->dev->features &
- (NETIF_F_IP_CSUM | NETIF_F_HW_CSUM))) {
-
- BUG_ON(skb->ip_summed == CHECKSUM_PARTIAL);
-
+ } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ uh->check = 0;
+ uh->check = udp_v4_check(len, saddr, daddr, lco_csum(skb));
+ if (uh->check == 0)
+ uh->check = CSUM_MANGLED_0;
+ } else if (skb_dst(skb) && skb_dst(skb)->dev &&
+ (skb_dst(skb)->dev->features &
+ (NETIF_F_IP_CSUM | NETIF_F_HW_CSUM))) {
skb->ip_summed = CHECKSUM_PARTIAL;
skb->csum_start = skb_transport_header(skb) - skb->head;
skb->csum_offset = offsetof(struct udphdr, check);
} else {
__wsum csum;
- BUG_ON(skb->ip_summed == CHECKSUM_PARTIAL);
-
uh->check = 0;
csum = skb_checksum(skb, 0, len, 0);
uh->check = udp_v4_check(len, saddr, daddr, csum);
uh->check = 0;
else if (skb_is_gso(skb))
uh->check = ~udp_v6_check(len, saddr, daddr, 0);
- else if (skb_dst(skb) && skb_dst(skb)->dev &&
- (skb_dst(skb)->dev->features & NETIF_F_IPV6_CSUM)) {
-
- BUG_ON(skb->ip_summed == CHECKSUM_PARTIAL);
-
+ else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ uh->check = 0;
+ uh->check = udp_v6_check(len, saddr, daddr, lco_csum(skb));
+ if (uh->check == 0)
+ uh->check = CSUM_MANGLED_0;
+ } else if (skb_dst(skb) && skb_dst(skb)->dev &&
+ (skb_dst(skb)->dev->features & NETIF_F_IPV6_CSUM)) {
skb->ip_summed = CHECKSUM_PARTIAL;
skb->csum_start = skb_transport_header(skb) - skb->head;
skb->csum_offset = offsetof(struct udphdr, check);
} else {
__wsum csum;
- BUG_ON(skb->ip_summed == CHECKSUM_PARTIAL);
-
uh->check = 0;
csum = skb_checksum(skb, 0, len, 0);
uh->check = udp_v6_check(len, saddr, daddr, csum);