2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-24 21:24:00 +08:00

Merge branch 'fib_trie-next'

Alexander Duyck says:

====================
ipv4/fib_trie: Cleanups to prepare for introduction of key vector

This patch series is meant to mostly just clean up the fib_trie to prepare
it for the introduction of the key_vector.  As such there are a number of
minor clean-ups such as reformatting the tnode to match the format once the
key vector is introduced, some optimizations to drop the need for a leaf
parent pointer, and some changes to remove duplication of effort such as
the 2 look-ups that were essentially being done per node insertion.

v2: Added code to cleanup idx >> n->bits and explain unsigned long logic
    Added code to prevent allocation when tnode size is larger than size_t
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2015-03-04 23:35:24 -05:00
commit f93eb4ba0f
4 changed files with 448 additions and 383 deletions

View File

@ -185,6 +185,7 @@ struct fib_table {
u32 tb_id;
int tb_default;
int tb_num_default;
struct rcu_head rcu;
unsigned long tb_data[0];
};
@ -206,12 +207,16 @@ void fib_free_table(struct fib_table *tb);
static inline struct fib_table *fib_get_table(struct net *net, u32 id)
{
struct hlist_node *tb_hlist;
struct hlist_head *ptr;
ptr = id == RT_TABLE_LOCAL ?
&net->ipv4.fib_table_hash[TABLE_LOCAL_INDEX] :
&net->ipv4.fib_table_hash[TABLE_MAIN_INDEX];
return hlist_entry(ptr->first, struct fib_table, tb_hlist);
tb_hlist = rcu_dereference_rtnl(hlist_first_rcu(ptr));
return hlist_entry(tb_hlist, struct fib_table, tb_hlist);
}
static inline struct fib_table *fib_new_table(struct net *net, u32 id)
@ -222,15 +227,19 @@ static inline struct fib_table *fib_new_table(struct net *net, u32 id)
static inline int fib_lookup(struct net *net, const struct flowi4 *flp,
struct fib_result *res)
{
int err = -ENETUNREACH;
struct fib_table *tb;
int err;
rcu_read_lock();
if (!fib_table_lookup(fib_get_table(net, RT_TABLE_LOCAL), flp, res,
FIB_LOOKUP_NOREF) ||
!fib_table_lookup(fib_get_table(net, RT_TABLE_MAIN), flp, res,
FIB_LOOKUP_NOREF))
err = 0;
for (err = 0; !err; err = -ENETUNREACH) {
tb = fib_get_table(net, RT_TABLE_LOCAL);
if (tb && !fib_table_lookup(tb, flp, res, FIB_LOOKUP_NOREF))
break;
tb = fib_get_table(net, RT_TABLE_MAIN);
if (tb && !fib_table_lookup(tb, flp, res, FIB_LOOKUP_NOREF))
break;
}
rcu_read_unlock();
@ -249,28 +258,33 @@ int __fib_lookup(struct net *net, struct flowi4 *flp, struct fib_result *res);
static inline int fib_lookup(struct net *net, struct flowi4 *flp,
struct fib_result *res)
{
if (!net->ipv4.fib_has_custom_rules) {
int err = -ENETUNREACH;
struct fib_table *tb;
int err;
rcu_read_lock();
if (net->ipv4.fib_has_custom_rules)
return __fib_lookup(net, flp, res);
res->tclassid = 0;
if ((net->ipv4.fib_local &&
!fib_table_lookup(net->ipv4.fib_local, flp, res,
FIB_LOOKUP_NOREF)) ||
(net->ipv4.fib_main &&
!fib_table_lookup(net->ipv4.fib_main, flp, res,
FIB_LOOKUP_NOREF)) ||
(net->ipv4.fib_default &&
!fib_table_lookup(net->ipv4.fib_default, flp, res,
FIB_LOOKUP_NOREF)))
err = 0;
rcu_read_lock();
rcu_read_unlock();
res->tclassid = 0;
return err;
for (err = 0; !err; err = -ENETUNREACH) {
tb = rcu_dereference_rtnl(net->ipv4.fib_local);
if (tb && !fib_table_lookup(tb, flp, res, FIB_LOOKUP_NOREF))
break;
tb = rcu_dereference_rtnl(net->ipv4.fib_main);
if (tb && !fib_table_lookup(tb, flp, res, FIB_LOOKUP_NOREF))
break;
tb = rcu_dereference_rtnl(net->ipv4.fib_default);
if (tb && !fib_table_lookup(tb, flp, res, FIB_LOOKUP_NOREF))
break;
}
return __fib_lookup(net, flp, res);
rcu_read_unlock();
return err;
}
#endif /* CONFIG_IP_MULTIPLE_TABLES */

View File

@ -7,6 +7,7 @@
#include <linux/uidgid.h>
#include <net/inet_frag.h>
#include <linux/rcupdate.h>
struct tcpm_hash_bucket;
struct ctl_table_header;
@ -38,9 +39,9 @@ struct netns_ipv4 {
#ifdef CONFIG_IP_MULTIPLE_TABLES
struct fib_rules_ops *rules_ops;
bool fib_has_custom_rules;
struct fib_table *fib_local;
struct fib_table *fib_main;
struct fib_table *fib_default;
struct fib_table __rcu *fib_local;
struct fib_table __rcu *fib_main;
struct fib_table __rcu *fib_default;
#endif
#ifdef CONFIG_IP_ROUTE_CLASSID
int fib_num_tclassid_users;

View File

@ -89,17 +89,14 @@ struct fib_table *fib_new_table(struct net *net, u32 id)
switch (id) {
case RT_TABLE_LOCAL:
net->ipv4.fib_local = tb;
rcu_assign_pointer(net->ipv4.fib_local, tb);
break;
case RT_TABLE_MAIN:
net->ipv4.fib_main = tb;
rcu_assign_pointer(net->ipv4.fib_main, tb);
break;
case RT_TABLE_DEFAULT:
net->ipv4.fib_default = tb;
rcu_assign_pointer(net->ipv4.fib_default, tb);
break;
default:
break;
}
@ -132,13 +129,14 @@ struct fib_table *fib_get_table(struct net *net, u32 id)
static void fib_flush(struct net *net)
{
int flushed = 0;
struct fib_table *tb;
struct hlist_head *head;
unsigned int h;
for (h = 0; h < FIB_TABLE_HASHSZ; h++) {
head = &net->ipv4.fib_table_hash[h];
hlist_for_each_entry(tb, head, tb_hlist)
struct hlist_head *head = &net->ipv4.fib_table_hash[h];
struct hlist_node *tmp;
struct fib_table *tb;
hlist_for_each_entry_safe(tb, tmp, head, tb_hlist)
flushed += fib_table_flush(tb);
}
@ -665,10 +663,12 @@ static int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
s_h = cb->args[0];
s_e = cb->args[1];
rcu_read_lock();
for (h = s_h; h < FIB_TABLE_HASHSZ; h++, s_e = 0) {
e = 0;
head = &net->ipv4.fib_table_hash[h];
hlist_for_each_entry(tb, head, tb_hlist) {
hlist_for_each_entry_rcu(tb, head, tb_hlist) {
if (e < s_e)
goto next;
if (dumped)
@ -682,6 +682,8 @@ next:
}
}
out:
rcu_read_unlock();
cb->args[1] = e;
cb->args[0] = h;
@ -1117,14 +1119,34 @@ static void ip_fib_net_exit(struct net *net)
rtnl_lock();
for (i = 0; i < FIB_TABLE_HASHSZ; i++) {
struct fib_table *tb;
struct hlist_head *head;
struct hlist_head *head = &net->ipv4.fib_table_hash[i];
struct hlist_node *tmp;
struct fib_table *tb;
head = &net->ipv4.fib_table_hash[i];
hlist_for_each_entry_safe(tb, tmp, head, tb_hlist) {
hlist_del(&tb->tb_hlist);
/* this is done in two passes as flushing the table could
* cause it to be reallocated in order to accommodate new
* tnodes at the root as the table shrinks.
*/
hlist_for_each_entry_safe(tb, tmp, head, tb_hlist)
fib_table_flush(tb);
hlist_for_each_entry_safe(tb, tmp, head, tb_hlist) {
#ifdef CONFIG_IP_MULTIPLE_TABLES
switch (tb->tb_id) {
case RT_TABLE_LOCAL:
RCU_INIT_POINTER(net->ipv4.fib_local, NULL);
break;
case RT_TABLE_MAIN:
RCU_INIT_POINTER(net->ipv4.fib_main, NULL);
break;
case RT_TABLE_DEFAULT:
RCU_INIT_POINTER(net->ipv4.fib_default, NULL);
break;
default:
break;
}
#endif
hlist_del(&tb->tb_hlist);
fib_free_table(tb);
}
}

File diff suppressed because it is too large Load Diff