diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 138a72274aa673daee1c763466bbd3b40080f31b..da856aa15aa8e03b7ea638f48ec78562d84afcd8 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -128,7 +128,7 @@ struct Qdisc { struct rcu_head rcu; netdevice_tracker dev_tracker; - KABI_RESERVE(1) + KABI_USE(1, int owner) KABI_RESERVE(2) /* private data */ diff --git a/net/core/dev.c b/net/core/dev.c index 1f6c8945f2ecac5d13dc375968265d0a09dfd8a4..1900cf08b804d7f92a8a56a14a836ac7ed870218 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -3818,6 +3818,10 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, return rc; } + if (unlikely(READ_ONCE(q->owner) == smp_processor_id())) { + kfree_skb_reason(skb, SKB_DROP_REASON_TC_RECLASSIFY_LOOP); + return NET_XMIT_DROP; + } /* * Heuristic to force contended enqueues to serialize on a * separate lock before trying to get qdisc main lock. @@ -3857,7 +3861,9 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, qdisc_run_end(q); rc = NET_XMIT_SUCCESS; } else { + WRITE_ONCE(q->owner, smp_processor_id()); rc = dev_qdisc_enqueue(skb, q, &to_free, txq); + WRITE_ONCE(q->owner, -1); if (qdisc_run_begin(q)) { if (unlikely(contended)) { spin_unlock(&q->busylock); diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 5d7e23f4cc0ee4c8c2c39cf10405f56fb6f0bfe1..fa34300915a8b7eea0b902e77948df242ba70eba 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -971,6 +971,7 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, sch->enqueue = ops->enqueue; sch->dequeue = ops->dequeue; sch->dev_queue = dev_queue; + sch->owner = -1; netdev_hold(dev, &sch->dev_tracker, GFP_KERNEL); refcount_set(&sch->refcnt, 1);