Merge branch 'nexthop-grp-stats' into next

Petr Machata  says:

====================

Next hop group stats allow verification of balancedness of a next hop
group. The feature was merged in kernel commit 7cf497e5a122 ("Merge branch
'nexthop-group-stats'"). This patchset adds to ip the corresponding
support.

NH group stats come in two flavors: as statistics for SW and for HW
datapaths. The former is shown when -s is given to "ip nexthop". The latter
demands more work from the kernel, and possibly driver and HW, and might
not be always necessary. Therefore tie it to -s -s, similarly to how ip
link shows more detailed stats when -s is given twice.

Here's an example usage:

 # ip link add name gre1 up type gre \
      local 172.16.1.1 remote 172.16.1.2 tos inherit
 # ip nexthop replace id 1001 dev gre1
 # ip nexthop replace id 1002 dev gre1
 # ip nexthop replace id 1111 group 1001/1002 hw_stats on
 # ip -s -s -j -p nexthop show id 1111
 [ {
 	[ ...snip... ]
         "hw_stats": {
             "enabled": true,
             "used": true
         },
         "group_stats": [ {
                 "id": 1001,
                 "packets": 0,
                 "packets_hw": 0
             },{
                 "id": 1002,
                 "packets": 0,
                 "packets_hw": 0
             } ]
     } ]

hw_stats.enabled shows whether hw_stats have been requested for the given
group. hw_stats.used shows whether any driver actually implemented the
counter. group_stats[].packets show the total stats, packets_hw only the
HW-datapath stats.

====================

Signed-off-by: David Ahern <dsahern@kernel.org>
This commit is contained in:
David Ahern 2024-03-15 15:05:23 +00:00
commit 7a6d30c95d
4 changed files with 156 additions and 0 deletions

View File

@ -260,6 +260,20 @@ static inline __u64 rta_getattr_u64(const struct rtattr *rta)
memcpy(&tmp, RTA_DATA(rta), sizeof(__u64));
return tmp;
}
static inline __u64 rta_getattr_uint(const struct rtattr *rta)
{
switch (RTA_PAYLOAD(rta)) {
case sizeof(__u8):
return rta_getattr_u8(rta);
case sizeof(__u16):
return rta_getattr_u16(rta);
case sizeof(__u32):
return rta_getattr_u32(rta);
case sizeof(__u64):
return rta_getattr_u64(rta);
}
return -1ULL;
}
static inline __s32 rta_getattr_s32(const struct rtattr *rta)
{
return *(__s32 *)RTA_DATA(rta);

View File

@ -25,6 +25,7 @@ static struct {
unsigned int fdb;
unsigned int id;
unsigned int nhid;
unsigned int op_flags;
} filter;
enum {
@ -55,6 +56,7 @@ static void usage(void)
" [ encap ENCAPTYPE ENCAPHDR ] |\n"
" group GROUP [ fdb ] [ type TYPE [ TYPE_ARGS ] ] }\n"
"GROUP := [ <id[,weight]>/<id[,weight]>/... ]\n"
" [ hw_stats {off|on} ]\n"
"TYPE := { mpath | resilient }\n"
"TYPE_ARGS := [ RESILIENT_ARGS ]\n"
"RESILIENT_ARGS := [ buckets BUCKETS ] [ idle_timer IDLE ]\n"
@ -92,6 +94,14 @@ static int nh_dump_filter(struct nlmsghdr *nlh, int reqlen)
return err;
}
if (filter.op_flags) {
__u32 op_flags = filter.op_flags;
err = addattr32(nlh, reqlen, NHA_OP_FLAGS, op_flags);
if (err)
return err;
}
return 0;
}
@ -296,6 +306,36 @@ static void parse_nh_res_group_rta(const struct rtattr *res_grp_attr,
}
}
static void parse_nh_group_stats_rta(const struct rtattr *grp_stats_attr,
struct nh_entry *nhe)
{
const struct rtattr *pos;
int i = 0;
rtattr_for_each_nested(pos, grp_stats_attr) {
struct nh_grp_stats *nh_grp_stats = &nhe->nh_grp_stats[i++];
struct rtattr *tb[NHA_GROUP_STATS_ENTRY_MAX + 1];
struct rtattr *rta;
parse_rtattr_nested(tb, NHA_GROUP_STATS_ENTRY_MAX, pos);
if (tb[NHA_GROUP_STATS_ENTRY_ID]) {
rta = tb[NHA_GROUP_STATS_ENTRY_ID];
nh_grp_stats->nh_id = rta_getattr_u32(rta);
}
if (tb[NHA_GROUP_STATS_ENTRY_PACKETS]) {
rta = tb[NHA_GROUP_STATS_ENTRY_PACKETS];
nh_grp_stats->packets = rta_getattr_uint(rta);
}
if (tb[NHA_GROUP_STATS_ENTRY_PACKETS_HW]) {
rta = tb[NHA_GROUP_STATS_ENTRY_PACKETS_HW];
nh_grp_stats->packets_hw = rta_getattr_uint(rta);
}
}
}
static void print_nh_res_group(const struct nha_res_grp *res_grp)
{
struct timeval tv;
@ -343,8 +383,38 @@ static void print_nh_res_bucket(FILE *fp, const struct rtattr *res_bucket_attr)
close_json_object();
}
static void print_nh_grp_stats(const struct nh_entry *nhe)
{
int i;
if (!show_stats)
return;
open_json_array(PRINT_JSON, "group_stats");
print_nl();
print_string(PRINT_FP, NULL, " stats:", NULL);
print_nl();
for (i = 0; i < nhe->nh_groups_cnt; i++) {
open_json_object(NULL);
print_uint(PRINT_ANY, "id", " id %u",
nhe->nh_grp_stats[i].nh_id);
print_u64(PRINT_ANY, "packets", " packets %llu",
nhe->nh_grp_stats[i].packets);
if (show_stats > 1)
print_u64(PRINT_ANY, "packets_hw", " packets_hw %llu",
nhe->nh_grp_stats[i].packets_hw);
if (i != nhe->nh_groups_cnt - 1)
print_nl();
close_json_object();
}
close_json_array(PRINT_JSON, NULL);
}
static void ipnh_destroy_entry(struct nh_entry *nhe)
{
free(nhe->nh_grp_stats);
free(nhe->nh_encap);
free(nhe->nh_groups);
}
@ -418,6 +488,25 @@ static int ipnh_parse_nhmsg(FILE *fp, const struct nhmsg *nhm, int len,
nhe->nh_has_res_grp = true;
}
if (tb[NHA_HW_STATS_ENABLE]) {
nhe->nh_hw_stats_supported = true;
nhe->nh_hw_stats_enabled =
!!rta_getattr_u32(tb[NHA_HW_STATS_ENABLE]);
}
if (tb[NHA_HW_STATS_USED])
nhe->nh_hw_stats_used = !!rta_getattr_u32(tb[NHA_HW_STATS_USED]);
if (tb[NHA_GROUP_STATS]) {
nhe->nh_grp_stats = calloc(nhe->nh_groups_cnt,
sizeof(*nhe->nh_grp_stats));
if (!nhe->nh_grp_stats) {
err = -ENOMEM;
goto out_err;
}
parse_nh_group_stats_rta(tb[NHA_GROUP_STATS], nhe);
}
nhe->nh_blackhole = !!tb[NHA_BLACKHOLE];
nhe->nh_fdb = !!tb[NHA_FDB];
@ -484,9 +573,34 @@ static void __print_nexthop_entry(FILE *fp, const char *jsobj,
if (nhe->nh_fdb)
print_null(PRINT_ANY, "fdb", "fdb", NULL);
if ((show_details > 0 || show_stats) && nhe->nh_hw_stats_supported) {
open_json_object("hw_stats");
print_on_off(PRINT_ANY, "enabled", "hw_stats %s ",
nhe->nh_hw_stats_enabled);
print_on_off(PRINT_ANY, "used", "used %s ",
nhe->nh_hw_stats_used);
close_json_object();
}
if (nhe->nh_grp_stats)
print_nh_grp_stats(nhe);
close_json_object();
}
static __u32 ipnh_get_op_flags(void)
{
__u32 op_flags = 0;
if (show_stats) {
op_flags |= NHA_OP_FLAG_DUMP_STATS;
if (show_stats > 1)
op_flags |= NHA_OP_FLAG_DUMP_HW_STATS;
}
return op_flags;
}
static int __ipnh_get_id(struct rtnl_handle *rthp, __u32 nh_id,
struct nlmsghdr **answer)
{
@ -500,8 +614,10 @@ static int __ipnh_get_id(struct rtnl_handle *rthp, __u32 nh_id,
.n.nlmsg_type = RTM_GETNEXTHOP,
.nhm.nh_family = preferred_family,
};
__u32 op_flags = ipnh_get_op_flags();
addattr32(&req.n, sizeof(req), NHA_ID, nh_id);
addattr32(&req.n, sizeof(req), NHA_OP_FLAGS, op_flags);
return rtnl_talk(rthp, &req.n, answer);
}
@ -987,6 +1103,17 @@ static int ipnh_modify(int cmd, unsigned int flags, int argc, char **argv)
if (rtnl_rtprot_a2n(&prot, *argv))
invarg("\"protocol\" value is invalid\n", *argv);
req.nhm.nh_protocol = prot;
} else if (!strcmp(*argv, "hw_stats")) {
bool hw_stats;
int ret;
NEXT_ARG();
hw_stats = parse_on_off("hw_stats", *argv, &ret);
if (ret)
return ret;
addattr32(&req.n, sizeof(req), NHA_HW_STATS_ENABLE,
hw_stats);
} else if (strcmp(*argv, "help") == 0) {
usage();
} else {
@ -1093,6 +1220,8 @@ static int ipnh_list_flush(int argc, char **argv, int action)
argc--; argv++;
}
filter.op_flags = ipnh_get_op_flags();
if (action == IPNH_FLUSH)
return ipnh_flush(all);

View File

@ -13,6 +13,12 @@ struct nha_res_grp {
__u64 unbalanced_time;
};
struct nh_grp_stats {
__u32 nh_id;
__u64 packets;
__u64 packets_hw;
};
struct nh_entry {
struct hlist_node nh_hash;
@ -27,6 +33,10 @@ struct nh_entry {
bool nh_blackhole;
bool nh_fdb;
bool nh_hw_stats_supported;
bool nh_hw_stats_enabled;
bool nh_hw_stats_used;
int nh_gateway_len;
union {
__be32 ipv4;
@ -44,6 +54,7 @@ struct nh_entry {
int nh_groups_cnt;
struct nexthop_grp *nh_groups;
struct nh_grp_stats *nh_grp_stats;
};
void print_cache_nexthop_id(FILE *fp, const char *fp_prefix, const char *jsobj,

View File

@ -68,6 +68,8 @@ ip-nexthop \- nexthop object management
.BR fdb " ] | "
.B group
.IR GROUP " [ "
.BR hw_stats " { "
.BR on " | " off " } ] [ "
.BR fdb " ] [ "
.B type
.IR TYPE " [ " TYPE_ARGS " ] ] }"