mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-21 01:54:51 +08:00
29cbcd8582
The Qdisc::running sequence counter has two uses: 1. Reliably reading qdisc's tc statistics while the qdisc is running (a seqcount read/retry loop at gnet_stats_add_basic()). 2. As a flag, indicating whether the qdisc in question is running (without any retry loops). For the first usage, the Qdisc::running sequence counter write section, qdisc_run_begin() => qdisc_run_end(), covers a much wider area than what is actually needed: the raw qdisc's bstats update. A u64_stats sync point was thus introduced (in previous commits) inside the bstats structure itself. A local u64_stats write section is then started and stopped for the bstats updates. Use that u64_stats sync point mechanism for the bstats read/retry loop at gnet_stats_add_basic(). For the second qdisc->running usage, a __QDISC_STATE_RUNNING bit flag, accessed with atomic bitops, is sufficient. Using a bit flag instead of a sequence counter at qdisc_run_begin/end() and qdisc_is_running() leads to the SMP barriers implicitly added through raw_read_seqcount() and write_seqcount_begin/end() getting removed. All call sites have been surveyed though, and no required ordering was identified. Now that the qdisc->running sequence counter is no longer used, remove it. Note, using u64_stats implies no sequence counter protection for 64-bit architectures. This can lead to the qdisc tc statistics "packets" vs. "bytes" values getting out of sync on rare occasions. The individual values will still be valid. Signed-off-by: Ahmed S. Darwish <a.darwish@linutronix.de> Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Signed-off-by: David S. Miller <davem@davemloft.net>
85 lines
3.0 KiB
C
85 lines
3.0 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef __NET_GEN_STATS_H
|
|
#define __NET_GEN_STATS_H
|
|
|
|
#include <linux/gen_stats.h>
|
|
#include <linux/socket.h>
|
|
#include <linux/rtnetlink.h>
|
|
#include <linux/pkt_sched.h>
|
|
|
|
/* Throughput stats.
|
|
* Must be initialized beforehand with gnet_stats_basic_sync_init().
|
|
*
|
|
* If no reads can ever occur parallel to writes (e.g. stack-allocated
|
|
* bstats), then the internal stat values can be written to and read
|
|
* from directly. Otherwise, use _bstats_set/update() for writes and
|
|
* gnet_stats_add_basic() for reads.
|
|
*/
|
|
struct gnet_stats_basic_sync {
|
|
u64_stats_t bytes;
|
|
u64_stats_t packets;
|
|
struct u64_stats_sync syncp;
|
|
} __aligned(2 * sizeof(u64));
|
|
|
|
struct net_rate_estimator;
|
|
|
|
struct gnet_dump {
|
|
spinlock_t * lock;
|
|
struct sk_buff * skb;
|
|
struct nlattr * tail;
|
|
|
|
/* Backward compatibility */
|
|
int compat_tc_stats;
|
|
int compat_xstats;
|
|
int padattr;
|
|
void * xstats;
|
|
int xstats_len;
|
|
struct tc_stats tc_stats;
|
|
};
|
|
|
|
void gnet_stats_basic_sync_init(struct gnet_stats_basic_sync *b);
|
|
int gnet_stats_start_copy(struct sk_buff *skb, int type, spinlock_t *lock,
|
|
struct gnet_dump *d, int padattr);
|
|
|
|
int gnet_stats_start_copy_compat(struct sk_buff *skb, int type,
|
|
int tc_stats_type, int xstats_type,
|
|
spinlock_t *lock, struct gnet_dump *d,
|
|
int padattr);
|
|
|
|
int gnet_stats_copy_basic(struct gnet_dump *d,
|
|
struct gnet_stats_basic_sync __percpu *cpu,
|
|
struct gnet_stats_basic_sync *b, bool running);
|
|
void gnet_stats_add_basic(struct gnet_stats_basic_sync *bstats,
|
|
struct gnet_stats_basic_sync __percpu *cpu,
|
|
struct gnet_stats_basic_sync *b, bool running);
|
|
int gnet_stats_copy_basic_hw(struct gnet_dump *d,
|
|
struct gnet_stats_basic_sync __percpu *cpu,
|
|
struct gnet_stats_basic_sync *b, bool running);
|
|
int gnet_stats_copy_rate_est(struct gnet_dump *d,
|
|
struct net_rate_estimator __rcu **ptr);
|
|
int gnet_stats_copy_queue(struct gnet_dump *d,
|
|
struct gnet_stats_queue __percpu *cpu_q,
|
|
struct gnet_stats_queue *q, __u32 qlen);
|
|
void gnet_stats_add_queue(struct gnet_stats_queue *qstats,
|
|
const struct gnet_stats_queue __percpu *cpu_q,
|
|
const struct gnet_stats_queue *q);
|
|
int gnet_stats_copy_app(struct gnet_dump *d, void *st, int len);
|
|
|
|
int gnet_stats_finish_copy(struct gnet_dump *d);
|
|
|
|
int gen_new_estimator(struct gnet_stats_basic_sync *bstats,
|
|
struct gnet_stats_basic_sync __percpu *cpu_bstats,
|
|
struct net_rate_estimator __rcu **rate_est,
|
|
spinlock_t *lock,
|
|
bool running, struct nlattr *opt);
|
|
void gen_kill_estimator(struct net_rate_estimator __rcu **ptr);
|
|
int gen_replace_estimator(struct gnet_stats_basic_sync *bstats,
|
|
struct gnet_stats_basic_sync __percpu *cpu_bstats,
|
|
struct net_rate_estimator __rcu **ptr,
|
|
spinlock_t *lock,
|
|
bool running, struct nlattr *opt);
|
|
bool gen_estimator_active(struct net_rate_estimator __rcu **ptr);
|
|
bool gen_estimator_read(struct net_rate_estimator __rcu **ptr,
|
|
struct gnet_stats_rate_est64 *sample);
|
|
#endif
|