From: Felix Fietkau Date: Sun, 24 Mar 2019 17:04:49 +0000 (+0100) Subject: kernel: use bulk free in kfree_skb_list to improve performance X-Git-Url: http://git.cdn.openwrt.org/?a=commitdiff_plain;h=98b654de2e7502507b31f0fb82befbb48f9c8542;p=openwrt%2Fstaging%2Fjow.git kernel: use bulk free in kfree_skb_list to improve performance Signed-off-by: Felix Fietkau --- diff --git a/target/linux/generic/pending-4.14/650-net-use-bulk-free-in-kfree_skb_list.patch b/target/linux/generic/pending-4.14/650-net-use-bulk-free-in-kfree_skb_list.patch new file mode 100644 index 0000000000..1d1a6433d9 --- /dev/null +++ b/target/linux/generic/pending-4.14/650-net-use-bulk-free-in-kfree_skb_list.patch @@ -0,0 +1,61 @@ +From: Felix Fietkau +Date: Sat, 23 Mar 2019 18:26:10 +0100 +Subject: [PATCH] net: use bulk free in kfree_skb_list + +Since we're freeing multiple skbs, we might as well use bulk free to save a +few cycles. Use the same conditions for bulk free as in napi_consume_skb. + +Signed-off-by: Felix Fietkau +--- + +--- a/net/core/skbuff.c ++++ b/net/core/skbuff.c +@@ -666,12 +666,44 @@ EXPORT_SYMBOL(kfree_skb); + + void kfree_skb_list(struct sk_buff *segs) + { +- while (segs) { +- struct sk_buff *next = segs->next; ++ struct sk_buff *next = segs; ++ void *skbs[16]; ++ int n_skbs = 0; + +- kfree_skb(segs); +- segs = next; ++ while ((segs = next) != NULL) { ++ next = segs->next; ++ ++ if (segs->fclone != SKB_FCLONE_UNAVAILABLE) { ++ kfree_skb(segs); ++ continue; ++ } ++ ++ if (!skb_unref(segs)) ++ continue; ++ ++ trace_kfree_skb(segs, __builtin_return_address(0)); ++ ++ /* drop skb->head and call any destructors for packet */ ++ skb_release_all(segs); ++ ++#ifdef CONFIG_SLUB ++ /* SLUB writes into objects when freeing */ ++ prefetchw(segs); ++#endif ++ ++ skbs[n_skbs++] = segs; ++ ++ if (n_skbs < ARRAY_SIZE(skbs)) ++ continue; ++ ++ kmem_cache_free_bulk(skbuff_head_cache, n_skbs, skbs); ++ n_skbs = 0; + } ++ ++ if (!n_skbs) ++ return; ++ ++ kmem_cache_free_bulk(skbuff_head_cache, n_skbs, skbs); + } + EXPORT_SYMBOL(kfree_skb_list); + diff --git a/target/linux/generic/pending-4.19/650-net-use-bulk-free-in-kfree_skb_list.patch b/target/linux/generic/pending-4.19/650-net-use-bulk-free-in-kfree_skb_list.patch new file mode 100644 index 0000000000..1d1a6433d9 --- /dev/null +++ b/target/linux/generic/pending-4.19/650-net-use-bulk-free-in-kfree_skb_list.patch @@ -0,0 +1,61 @@ +From: Felix Fietkau +Date: Sat, 23 Mar 2019 18:26:10 +0100 +Subject: [PATCH] net: use bulk free in kfree_skb_list + +Since we're freeing multiple skbs, we might as well use bulk free to save a +few cycles. Use the same conditions for bulk free as in napi_consume_skb. + +Signed-off-by: Felix Fietkau +--- + +--- a/net/core/skbuff.c ++++ b/net/core/skbuff.c +@@ -666,12 +666,44 @@ EXPORT_SYMBOL(kfree_skb); + + void kfree_skb_list(struct sk_buff *segs) + { +- while (segs) { +- struct sk_buff *next = segs->next; ++ struct sk_buff *next = segs; ++ void *skbs[16]; ++ int n_skbs = 0; + +- kfree_skb(segs); +- segs = next; ++ while ((segs = next) != NULL) { ++ next = segs->next; ++ ++ if (segs->fclone != SKB_FCLONE_UNAVAILABLE) { ++ kfree_skb(segs); ++ continue; ++ } ++ ++ if (!skb_unref(segs)) ++ continue; ++ ++ trace_kfree_skb(segs, __builtin_return_address(0)); ++ ++ /* drop skb->head and call any destructors for packet */ ++ skb_release_all(segs); ++ ++#ifdef CONFIG_SLUB ++ /* SLUB writes into objects when freeing */ ++ prefetchw(segs); ++#endif ++ ++ skbs[n_skbs++] = segs; ++ ++ if (n_skbs < ARRAY_SIZE(skbs)) ++ continue; ++ ++ kmem_cache_free_bulk(skbuff_head_cache, n_skbs, skbs); ++ n_skbs = 0; + } ++ ++ if (!n_skbs) ++ return; ++ ++ kmem_cache_free_bulk(skbuff_head_cache, n_skbs, skbs); + } + EXPORT_SYMBOL(kfree_skb_list); +