mirror of
https://github.com/edk2-porting/linux-next.git
synced 2025-01-01 10:13:58 +08:00
Merge branch 'net-dev-BYPASS-for-lockless-qdisc'
Paolo Abeni says: ==================== net: dev: BYPASS for lockless qdisc This patch series is aimed at improving xmit performances of lockless qdisc in the uncontended scenario. After the lockless refactor pfifo_fast can't leverage the BYPASS optimization. Due to retpolines the overhead for the avoidables enqueue and dequeue operations has increased and we see measurable regressions. The first patch introduces the BYPASS code path for lockless qdisc, and the second one optimizes such path further. Overall this avoids up to 3 indirect calls per xmit packet. Detailed performance figures are reported in the 2nd patch. v2 -> v3: - qdisc_is_empty() has a const argument (Eric) v1 -> v2: - use really an 'empty' flag instead of 'not_empty', as suggested by Eric ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
7c1508e5f6
@ -113,6 +113,9 @@ struct Qdisc {
|
||||
|
||||
spinlock_t busylock ____cacheline_aligned_in_smp;
|
||||
spinlock_t seqlock;
|
||||
|
||||
/* for NOLOCK qdisc, true if there are no enqueued skbs */
|
||||
bool empty;
|
||||
struct rcu_head rcu;
|
||||
};
|
||||
|
||||
@ -143,11 +146,19 @@ static inline bool qdisc_is_running(struct Qdisc *qdisc)
|
||||
return (raw_read_seqcount(&qdisc->running) & 1) ? true : false;
|
||||
}
|
||||
|
||||
static inline bool qdisc_is_empty(const struct Qdisc *qdisc)
|
||||
{
|
||||
if (qdisc->flags & TCQ_F_NOLOCK)
|
||||
return qdisc->empty;
|
||||
return !qdisc->q.qlen;
|
||||
}
|
||||
|
||||
static inline bool qdisc_run_begin(struct Qdisc *qdisc)
|
||||
{
|
||||
if (qdisc->flags & TCQ_F_NOLOCK) {
|
||||
if (!spin_trylock(&qdisc->seqlock))
|
||||
return false;
|
||||
qdisc->empty = false;
|
||||
} else if (qdisc_is_running(qdisc)) {
|
||||
return false;
|
||||
}
|
||||
|
@ -3468,6 +3468,15 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
|
||||
if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
|
||||
__qdisc_drop(skb, &to_free);
|
||||
rc = NET_XMIT_DROP;
|
||||
} else if ((q->flags & TCQ_F_CAN_BYPASS) && q->empty &&
|
||||
qdisc_run_begin(q)) {
|
||||
qdisc_bstats_cpu_update(q, skb);
|
||||
|
||||
if (sch_direct_xmit(skb, q, dev, txq, NULL, true))
|
||||
__qdisc_run(q);
|
||||
|
||||
qdisc_run_end(q);
|
||||
rc = NET_XMIT_SUCCESS;
|
||||
} else {
|
||||
rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK;
|
||||
qdisc_run(q);
|
||||
|
@ -671,6 +671,8 @@ static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc)
|
||||
qdisc_qstats_cpu_backlog_dec(qdisc, skb);
|
||||
qdisc_bstats_cpu_update(qdisc, skb);
|
||||
qdisc_qstats_atomic_qlen_dec(qdisc);
|
||||
} else {
|
||||
qdisc->empty = true;
|
||||
}
|
||||
|
||||
return skb;
|
||||
@ -880,6 +882,7 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
|
||||
sch->enqueue = ops->enqueue;
|
||||
sch->dequeue = ops->dequeue;
|
||||
sch->dev_queue = dev_queue;
|
||||
sch->empty = true;
|
||||
dev_hold(dev);
|
||||
refcount_set(&sch->refcnt, 1);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user