sfc: implement counters readout to TC stats

On FLOW_CLS_STATS, look up the MAE counter by TC cookie, and report the
 change in packet and byte count since the last time FLOW_CLS_STATS read
 them.

Signed-off-by: Edward Cree <ecree.xilinx@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Edward Cree 2022-11-14 13:16:01 +00:00 committed by David S. Miller
parent 83a187a4eb
commit 50f8f2f7fb
3 changed files with 52 additions and 0 deletions

View File

@ -595,6 +595,42 @@ static int efx_tc_flower_destroy(struct efx_nic *efx,
return 0; return 0;
} }
static int efx_tc_flower_stats(struct efx_nic *efx, struct net_device *net_dev,
struct flow_cls_offload *tc)
{
struct netlink_ext_ack *extack = tc->common.extack;
struct efx_tc_counter_index *ctr;
struct efx_tc_counter *cnt;
u64 packets, bytes;
ctr = efx_tc_flower_find_counter_index(efx, tc->cookie);
if (!ctr) {
/* See comment in efx_tc_flower_destroy() */
if (!IS_ERR(efx_tc_flower_lookup_efv(efx, net_dev)))
if (net_ratelimit())
netif_warn(efx, drv, efx->net_dev,
"Filter %lx not found for stats\n",
tc->cookie);
NL_SET_ERR_MSG_MOD(extack, "Flow cookie not found in offloaded rules");
return -ENOENT;
}
if (WARN_ON(!ctr->cnt)) /* can't happen */
return -EIO;
cnt = ctr->cnt;
spin_lock_bh(&cnt->lock);
/* Report only new pkts/bytes since last time TC asked */
packets = cnt->packets;
bytes = cnt->bytes;
flow_stats_update(&tc->stats, bytes - cnt->old_bytes,
packets - cnt->old_packets, 0, cnt->touched,
FLOW_ACTION_HW_STATS_DELAYED);
cnt->old_packets = packets;
cnt->old_bytes = bytes;
spin_unlock_bh(&cnt->lock);
return 0;
}
int efx_tc_flower(struct efx_nic *efx, struct net_device *net_dev, int efx_tc_flower(struct efx_nic *efx, struct net_device *net_dev,
struct flow_cls_offload *tc, struct efx_rep *efv) struct flow_cls_offload *tc, struct efx_rep *efv)
{ {
@ -611,6 +647,9 @@ int efx_tc_flower(struct efx_nic *efx, struct net_device *net_dev,
case FLOW_CLS_DESTROY: case FLOW_CLS_DESTROY:
rc = efx_tc_flower_destroy(efx, net_dev, tc); rc = efx_tc_flower_destroy(efx, net_dev, tc);
break; break;
case FLOW_CLS_STATS:
rc = efx_tc_flower_stats(efx, net_dev, tc);
break;
default: default:
rc = -EOPNOTSUPP; rc = -EOPNOTSUPP;
break; break;

View File

@ -198,6 +198,16 @@ struct efx_tc_counter_index *efx_tc_flower_get_counter_index(
return ctr; return ctr;
} }
struct efx_tc_counter_index *efx_tc_flower_find_counter_index(
struct efx_nic *efx, unsigned long cookie)
{
struct efx_tc_counter_index key = {};
key.cookie = cookie;
return rhashtable_lookup_fast(&efx->tc->counter_id_ht, &key,
efx_tc_counter_id_ht_params);
}
/* TC Channel. Counter updates are delivered on this channel's RXQ. */ /* TC Channel. Counter updates are delivered on this channel's RXQ. */
static void efx_tc_handle_no_channel(struct efx_nic *efx) static void efx_tc_handle_no_channel(struct efx_nic *efx)

View File

@ -29,6 +29,7 @@ struct efx_tc_counter {
spinlock_t lock; /* Serialises updates to counter values */ spinlock_t lock; /* Serialises updates to counter values */
u32 gen; /* Generation count at which this counter is current */ u32 gen; /* Generation count at which this counter is current */
u64 packets, bytes; u64 packets, bytes;
u64 old_packets, old_bytes; /* Values last time passed to userspace */
/* jiffies of the last time we saw packets increase */ /* jiffies of the last time we saw packets increase */
unsigned long touched; unsigned long touched;
}; };
@ -50,6 +51,8 @@ struct efx_tc_counter_index *efx_tc_flower_get_counter_index(
enum efx_tc_counter_type type); enum efx_tc_counter_type type);
void efx_tc_flower_put_counter_index(struct efx_nic *efx, void efx_tc_flower_put_counter_index(struct efx_nic *efx,
struct efx_tc_counter_index *ctr); struct efx_tc_counter_index *ctr);
struct efx_tc_counter_index *efx_tc_flower_find_counter_index(
struct efx_nic *efx, unsigned long cookie);
extern const struct efx_channel_type efx_tc_channel_type; extern const struct efx_channel_type efx_tc_channel_type;