diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index fad29c9961cca5623e043132752cab6a82cbb9db..efacdfee3702f28f7b7ca8e0d1374ebe8818e0dc 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -115,7 +115,7 @@ struct Qdisc { bool empty; struct rcu_head rcu; - KABI_RESERVE(1) + KABI_USE(1, int owner) KABI_RESERVE(2) /* private data */ diff --git a/net/core/dev.c b/net/core/dev.c index 72784077f0ccaa14b2804fcce9b7ac8abcf7eaa0..9ad27796abed34f13e844e60d43af24e70be53dd 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -3789,6 +3789,10 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, return rc; } + if (unlikely(READ_ONCE(q->owner) == smp_processor_id())) { + kfree_skb(skb); + return NET_XMIT_DROP; + } /* * Heuristic to force contended enqueues to serialize on a * separate lock before trying to get qdisc main lock. @@ -3824,7 +3828,9 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, qdisc_run_end(q); rc = NET_XMIT_SUCCESS; } else { + WRITE_ONCE(q->owner, smp_processor_id()); rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK; + WRITE_ONCE(q->owner, -1); if (qdisc_run_begin(q)) { if (unlikely(contended)) { spin_unlock(&q->busylock); diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index ecdd9e83f2f49b874920b4891a0ff81277a7a815..a3e9dd348500beeae94bb68c8a7ed9f735781843 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -899,6 +899,7 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, sch->dequeue = ops->dequeue; sch->dev_queue = dev_queue; sch->empty = true; + sch->owner = -1; dev_hold(dev); refcount_set(&sch->refcnt, 1);