diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 8271b10bc30e4a5745ae61b9740579b299aa244f..f78a2ea07771ad2bffef1976f3f2193035dea094 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -115,7 +115,7 @@ struct Qdisc { bool empty; struct rcu_head rcu; - KABI_RESERVE(1) + KABI_USE(1, int owner) KABI_RESERVE(2) /* private data */ diff --git a/net/core/dev.c b/net/core/dev.c index 0109bfccadb8a3676e6f7b0435207010c8b2c8e2..1cac2557f1b979a6b60b1120f3f389d244d99ca3 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -3785,6 +3785,10 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, return rc; } + if (unlikely(READ_ONCE(q->owner) == smp_processor_id())) { + kfree_skb(skb); + return NET_XMIT_DROP; + } /* * Heuristic to force contended enqueues to serialize on a * separate lock before trying to get qdisc main lock. @@ -3820,7 +3824,9 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, qdisc_run_end(q); rc = NET_XMIT_SUCCESS; } else { + WRITE_ONCE(q->owner, smp_processor_id()); rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK; + WRITE_ONCE(q->owner, -1); if (qdisc_run_begin(q)) { if (unlikely(contended)) { spin_unlock(&q->busylock); diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index ecdd9e83f2f49b874920b4891a0ff81277a7a815..ec55f306ab2ae93ce9afcbef3d16a109d55bc7f5 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -592,6 +592,7 @@ struct Qdisc noop_qdisc = { .qlen = 0, .lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.skb_bad_txq.lock), }, + .owner = -1, }; EXPORT_SYMBOL(noop_qdisc); @@ -899,6 +900,7 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, sch->dequeue = ops->dequeue; sch->dev_queue = dev_queue; sch->empty = true; + sch->owner = -1; dev_hold(dev); refcount_set(&sch->refcnt, 1);