2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-18 02:04:05 +08:00

ipmr/ip6mr: allow to get unresolved cache via netlink

/proc/net/ip[6]_mr_cache allows to get all mfc entries, even if they are put in
the unresolved list (mfc[6]_unres_queue). But only the table RT_TABLE_DEFAULT is
displayed.
This patch adds the parsing of the unresolved list when the dump is made via
rtnetlink, hence each table can be checked.

In IPv6, we set rtm_type in ip6mr_fill_mroute(), because in case of unresolved
mfc __ip6mr_fill_mroute() will not set it. In IPv4, it is already done.

Signed-off-by: Nicolas Dichtel <nicolas.dichtel@6wind.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Nicolas Dichtel 2012-12-04 01:13:39 +00:00 committed by David S. Miller
parent 9a68ac72a4
commit 1eb99af52c
2 changed files with 41 additions and 2 deletions

View File

@ -2154,6 +2154,7 @@ static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
{
struct nlmsghdr *nlh;
struct rtmsg *rtm;
int err;
nlh = nlmsg_put(skb, portid, seq, RTM_NEWROUTE, sizeof(*rtm), NLM_F_MULTI);
if (nlh == NULL)
@ -2178,7 +2179,9 @@ static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
if (nla_put_be32(skb, RTA_SRC, c->mfc_origin) ||
nla_put_be32(skb, RTA_DST, c->mfc_mcastgrp))
goto nla_put_failure;
if (__ipmr_fill_mroute(mrt, skb, c, rtm) < 0)
err = __ipmr_fill_mroute(mrt, skb, c, rtm);
/* do not break the dump if cache is unresolved */
if (err < 0 && err != -ENOENT)
goto nla_put_failure;
return nlmsg_end(skb, nlh);
@ -2221,6 +2224,22 @@ next_entry:
}
e = s_e = 0;
}
spin_lock_bh(&mfc_unres_lock);
list_for_each_entry(mfc, &mrt->mfc_unres_queue, list) {
if (e < s_e)
goto next_entry2;
if (ipmr_fill_mroute(mrt, skb,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
mfc) < 0) {
spin_unlock_bh(&mfc_unres_lock);
goto done;
}
next_entry2:
e++;
}
spin_unlock_bh(&mfc_unres_lock);
e = s_e = 0;
s_h = 0;
next_table:
t++;

View File

@ -2235,6 +2235,7 @@ static int ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
{
struct nlmsghdr *nlh;
struct rtmsg *rtm;
int err;
nlh = nlmsg_put(skb, portid, seq, RTM_NEWROUTE, sizeof(*rtm), NLM_F_MULTI);
if (nlh == NULL)
@ -2248,6 +2249,7 @@ static int ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
rtm->rtm_table = mrt->id;
if (nla_put_u32(skb, RTA_TABLE, mrt->id))
goto nla_put_failure;
rtm->rtm_type = RTN_MULTICAST;
rtm->rtm_scope = RT_SCOPE_UNIVERSE;
if (c->mfc_flags & MFC_STATIC)
rtm->rtm_protocol = RTPROT_STATIC;
@ -2258,7 +2260,9 @@ static int ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
if (nla_put(skb, RTA_SRC, 16, &c->mf6c_origin) ||
nla_put(skb, RTA_DST, 16, &c->mf6c_mcastgrp))
goto nla_put_failure;
if (__ip6mr_fill_mroute(mrt, skb, c, rtm) < 0)
err = __ip6mr_fill_mroute(mrt, skb, c, rtm);
/* do not break the dump if cache is unresolved */
if (err < 0 && err != -ENOENT)
goto nla_put_failure;
return nlmsg_end(skb, nlh);
@ -2301,6 +2305,22 @@ next_entry:
}
e = s_e = 0;
}
spin_lock_bh(&mfc_unres_lock);
list_for_each_entry(mfc, &mrt->mfc6_unres_queue, list) {
if (e < s_e)
goto next_entry2;
if (ip6mr_fill_mroute(mrt, skb,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
mfc) < 0) {
spin_unlock_bh(&mfc_unres_lock);
goto done;
}
next_entry2:
e++;
}
spin_unlock_bh(&mfc_unres_lock);
e = s_e = 0;
s_h = 0;
next_table:
t++;