mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-05 03:44:03 +08:00
xsk: Add new statistics
It can be useful for the user to know the reason behind a dropped packet. Introduce new counters which track drops on the receive path caused by: 1. rx ring being full 2. fill ring being empty Also, on the tx path introduce a counter which tracks the number of times we attempt pull from the tx ring when it is empty. Signed-off-by: Ciara Loftus <ciara.loftus@intel.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Link: https://lore.kernel.org/bpf/20200708072835.4427-2-ciara.loftus@intel.com
This commit is contained in:
parent
24a38b7c0c
commit
8aa5a33578
@ -69,7 +69,11 @@ struct xdp_sock {
|
||||
spinlock_t tx_completion_lock;
|
||||
/* Protects generic receive. */
|
||||
spinlock_t rx_lock;
|
||||
|
||||
/* Statistics */
|
||||
u64 rx_dropped;
|
||||
u64 rx_queue_full;
|
||||
|
||||
struct list_head map_list;
|
||||
/* Protects map_list */
|
||||
spinlock_t map_list_lock;
|
||||
|
@ -73,9 +73,12 @@ struct xdp_umem_reg {
|
||||
};
|
||||
|
||||
struct xdp_statistics {
|
||||
__u64 rx_dropped; /* Dropped for reasons other than invalid desc */
|
||||
__u64 rx_dropped; /* Dropped for other reasons */
|
||||
__u64 rx_invalid_descs; /* Dropped due to invalid descriptor */
|
||||
__u64 tx_invalid_descs; /* Dropped due to invalid descriptor */
|
||||
__u64 rx_ring_full; /* Dropped due to rx ring being full */
|
||||
__u64 rx_fill_ring_empty_descs; /* Failed to retrieve item from fill ring */
|
||||
__u64 tx_ring_empty_descs; /* Failed to retrieve item from tx ring */
|
||||
};
|
||||
|
||||
struct xdp_options {
|
||||
|
@ -123,7 +123,7 @@ static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
|
||||
addr = xp_get_handle(xskb);
|
||||
err = xskq_prod_reserve_desc(xs->rx, addr, len);
|
||||
if (err) {
|
||||
xs->rx_dropped++;
|
||||
xs->rx_queue_full++;
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -274,8 +274,10 @@ bool xsk_umem_consume_tx(struct xdp_umem *umem, struct xdp_desc *desc)
|
||||
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(xs, &umem->xsk_tx_list, list) {
|
||||
if (!xskq_cons_peek_desc(xs->tx, desc, umem))
|
||||
if (!xskq_cons_peek_desc(xs->tx, desc, umem)) {
|
||||
xs->tx->queue_empty_descs++;
|
||||
continue;
|
||||
}
|
||||
|
||||
/* This is the backpressure mechanism for the Tx path.
|
||||
* Reserve space in the completion queue and only proceed
|
||||
@ -387,6 +389,8 @@ static int xsk_generic_xmit(struct sock *sk)
|
||||
sent_frame = true;
|
||||
}
|
||||
|
||||
xs->tx->queue_empty_descs++;
|
||||
|
||||
out:
|
||||
if (sent_frame)
|
||||
sk->sk_write_space(sk);
|
||||
@ -812,6 +816,12 @@ static void xsk_enter_umem_offsets(struct xdp_ring_offset_v1 *ring)
|
||||
ring->desc = offsetof(struct xdp_umem_ring, desc);
|
||||
}
|
||||
|
||||
struct xdp_statistics_v1 {
|
||||
__u64 rx_dropped;
|
||||
__u64 rx_invalid_descs;
|
||||
__u64 tx_invalid_descs;
|
||||
};
|
||||
|
||||
static int xsk_getsockopt(struct socket *sock, int level, int optname,
|
||||
char __user *optval, int __user *optlen)
|
||||
{
|
||||
@ -831,19 +841,35 @@ static int xsk_getsockopt(struct socket *sock, int level, int optname,
|
||||
case XDP_STATISTICS:
|
||||
{
|
||||
struct xdp_statistics stats;
|
||||
bool extra_stats = true;
|
||||
size_t stats_size;
|
||||
|
||||
if (len < sizeof(stats))
|
||||
if (len < sizeof(struct xdp_statistics_v1)) {
|
||||
return -EINVAL;
|
||||
} else if (len < sizeof(stats)) {
|
||||
extra_stats = false;
|
||||
stats_size = sizeof(struct xdp_statistics_v1);
|
||||
} else {
|
||||
stats_size = sizeof(stats);
|
||||
}
|
||||
|
||||
mutex_lock(&xs->mutex);
|
||||
stats.rx_dropped = xs->rx_dropped;
|
||||
if (extra_stats) {
|
||||
stats.rx_ring_full = xs->rx_queue_full;
|
||||
stats.rx_fill_ring_empty_descs =
|
||||
xs->umem ? xskq_nb_queue_empty_descs(xs->umem->fq) : 0;
|
||||
stats.tx_ring_empty_descs = xskq_nb_queue_empty_descs(xs->tx);
|
||||
} else {
|
||||
stats.rx_dropped += xs->rx_queue_full;
|
||||
}
|
||||
stats.rx_invalid_descs = xskq_nb_invalid_descs(xs->rx);
|
||||
stats.tx_invalid_descs = xskq_nb_invalid_descs(xs->tx);
|
||||
mutex_unlock(&xs->mutex);
|
||||
|
||||
if (copy_to_user(optval, &stats, sizeof(stats)))
|
||||
if (copy_to_user(optval, &stats, stats_size))
|
||||
return -EFAULT;
|
||||
if (put_user(sizeof(stats), optlen))
|
||||
if (put_user(stats_size, optlen))
|
||||
return -EFAULT;
|
||||
|
||||
return 0;
|
||||
|
@ -235,6 +235,7 @@ static struct xdp_buff_xsk *__xp_alloc(struct xsk_buff_pool *pool)
|
||||
|
||||
for (;;) {
|
||||
if (!xskq_cons_peek_addr_unchecked(pool->fq, &addr)) {
|
||||
pool->fq->queue_empty_descs++;
|
||||
xp_release(xskb);
|
||||
return NULL;
|
||||
}
|
||||
|
@ -38,6 +38,7 @@ struct xsk_queue {
|
||||
u32 cached_cons;
|
||||
struct xdp_ring *ring;
|
||||
u64 invalid_descs;
|
||||
u64 queue_empty_descs;
|
||||
};
|
||||
|
||||
/* The structure of the shared state of the rings are the same as the
|
||||
@ -354,6 +355,11 @@ static inline u64 xskq_nb_invalid_descs(struct xsk_queue *q)
|
||||
return q ? q->invalid_descs : 0;
|
||||
}
|
||||
|
||||
static inline u64 xskq_nb_queue_empty_descs(struct xsk_queue *q)
|
||||
{
|
||||
return q ? q->queue_empty_descs : 0;
|
||||
}
|
||||
|
||||
struct xsk_queue *xskq_create(u32 nentries, bool umem_queue);
|
||||
void xskq_destroy(struct xsk_queue *q_ops);
|
||||
|
||||
|
@ -73,9 +73,12 @@ struct xdp_umem_reg {
|
||||
};
|
||||
|
||||
struct xdp_statistics {
|
||||
__u64 rx_dropped; /* Dropped for reasons other than invalid desc */
|
||||
__u64 rx_dropped; /* Dropped for other reasons */
|
||||
__u64 rx_invalid_descs; /* Dropped due to invalid descriptor */
|
||||
__u64 tx_invalid_descs; /* Dropped due to invalid descriptor */
|
||||
__u64 rx_ring_full; /* Dropped due to rx ring being full */
|
||||
__u64 rx_fill_ring_empty_descs; /* Failed to retrieve item from fill ring */
|
||||
__u64 tx_ring_empty_descs; /* Failed to retrieve item from tx ring */
|
||||
};
|
||||
|
||||
struct xdp_options {
|
||||
|
Loading…
Reference in New Issue
Block a user