path: root/net/sched/sch_hhf.c
diff options
authorDavid S. Miller <davem@davemloft.net>2016-06-15 14:08:36 -0700
committerDavid S. Miller <davem@davemloft.net>2016-06-15 14:08:36 -0700
commit88da48f4977a4b31e03cac1b6ea38f24d7916d10 (patch)
tree462e30cb64cf066c1d13cd98eb0ea502115bafda /net/sched/sch_hhf.c
parent35c55c9877f8de0ab129fa1a309271d0ecc868b9 (diff)
parentfea024784f588a1c50e7718d6053697ebdcc033e (diff)
Merge branch 'sched_skb_free_defer'
Eric Dumazet says: ==================== net_sched: defer skb freeing while changing qdiscs qdiscs/classes are changed under RTNL protection and often while blocking BH and root qdisc spinlock. When lots of skbs need to be dropped, we free them under these locks causing TX/RX freezes, and more generally latency spikes. I saw spikes of 50+ ms on quite fast hardware... This patch series adds a simple queue protected by RTNL where skbs can be placed until RTNL is released. Note that this might also serve in the future for optional reinjection of packets when a qdisc is replaced. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched/sch_hhf.c')
1 files changed, 2 insertions, 2 deletions
diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c
index c51791848a38..c44593b8e65a 100644
--- a/net/sched/sch_hhf.c
+++ b/net/sched/sch_hhf.c
@@ -464,7 +464,7 @@ static void hhf_reset(struct Qdisc *sch)
struct sk_buff *skb;
while ((skb = hhf_dequeue(sch)) != NULL)
- kfree_skb(skb);
+ rtnl_kfree_skbs(skb, skb);
static void *hhf_zalloc(size_t sz)
@@ -574,7 +574,7 @@ static int hhf_change(struct Qdisc *sch, struct nlattr *opt)
while (sch->q.qlen > sch->limit) {
struct sk_buff *skb = hhf_dequeue(sch);
- kfree_skb(skb);
+ rtnl_kfree_skbs(skb, skb);
qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen,
prev_backlog - sch->qstats.backlog);