2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-11-18 07:35:12 +08:00

ipv4: add __rcu annotations to routes.c

Add __rcu annotations to :
        (struct dst_entry)->rt_next
        (struct rt_hash_bucket)->chain

And use appropriate rcu primitives to reduce sparse warnings if
CONFIG_SPARSE_RCU_POINTER=y

Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Eric Dumazet 2010-10-25 21:02:07 +00:00 committed by David S. Miller
parent c1b60092cf
commit 1c31720a74
2 changed files with 47 additions and 30 deletions

View File

@ -95,7 +95,7 @@ struct dst_entry {
unsigned long lastuse; unsigned long lastuse;
union { union {
struct dst_entry *next; struct dst_entry *next;
struct rtable *rt_next; struct rtable __rcu *rt_next;
struct rt6_info *rt6_next; struct rt6_info *rt6_next;
struct dn_route *dn_next; struct dn_route *dn_next;
}; };

View File

@ -198,7 +198,7 @@ const __u8 ip_tos2prio[16] = {
*/ */
struct rt_hash_bucket { struct rt_hash_bucket {
struct rtable *chain; struct rtable __rcu *chain;
}; };
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \ #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \
@ -280,7 +280,7 @@ static struct rtable *rt_cache_get_first(struct seq_file *seq)
struct rtable *r = NULL; struct rtable *r = NULL;
for (st->bucket = rt_hash_mask; st->bucket >= 0; --st->bucket) { for (st->bucket = rt_hash_mask; st->bucket >= 0; --st->bucket) {
if (!rt_hash_table[st->bucket].chain) if (!rcu_dereference_raw(rt_hash_table[st->bucket].chain))
continue; continue;
rcu_read_lock_bh(); rcu_read_lock_bh();
r = rcu_dereference_bh(rt_hash_table[st->bucket].chain); r = rcu_dereference_bh(rt_hash_table[st->bucket].chain);
@ -300,17 +300,17 @@ static struct rtable *__rt_cache_get_next(struct seq_file *seq,
{ {
struct rt_cache_iter_state *st = seq->private; struct rt_cache_iter_state *st = seq->private;
r = r->dst.rt_next; r = rcu_dereference_bh(r->dst.rt_next);
while (!r) { while (!r) {
rcu_read_unlock_bh(); rcu_read_unlock_bh();
do { do {
if (--st->bucket < 0) if (--st->bucket < 0)
return NULL; return NULL;
} while (!rt_hash_table[st->bucket].chain); } while (!rcu_dereference_raw(rt_hash_table[st->bucket].chain));
rcu_read_lock_bh(); rcu_read_lock_bh();
r = rt_hash_table[st->bucket].chain; r = rcu_dereference_bh(rt_hash_table[st->bucket].chain);
} }
return rcu_dereference_bh(r); return r;
} }
static struct rtable *rt_cache_get_next(struct seq_file *seq, static struct rtable *rt_cache_get_next(struct seq_file *seq,
@ -721,19 +721,23 @@ static void rt_do_flush(int process_context)
for (i = 0; i <= rt_hash_mask; i++) { for (i = 0; i <= rt_hash_mask; i++) {
if (process_context && need_resched()) if (process_context && need_resched())
cond_resched(); cond_resched();
rth = rt_hash_table[i].chain; rth = rcu_dereference_raw(rt_hash_table[i].chain);
if (!rth) if (!rth)
continue; continue;
spin_lock_bh(rt_hash_lock_addr(i)); spin_lock_bh(rt_hash_lock_addr(i));
#ifdef CONFIG_NET_NS #ifdef CONFIG_NET_NS
{ {
struct rtable ** prev, * p; struct rtable __rcu **prev;
struct rtable *p;
rth = rt_hash_table[i].chain; rth = rcu_dereference_protected(rt_hash_table[i].chain,
lockdep_is_held(rt_hash_lock_addr(i)));
/* defer releasing the head of the list after spin_unlock */ /* defer releasing the head of the list after spin_unlock */
for (tail = rth; tail; tail = tail->dst.rt_next) for (tail = rth; tail;
tail = rcu_dereference_protected(tail->dst.rt_next,
lockdep_is_held(rt_hash_lock_addr(i))))
if (!rt_is_expired(tail)) if (!rt_is_expired(tail))
break; break;
if (rth != tail) if (rth != tail)
@ -741,8 +745,12 @@ static void rt_do_flush(int process_context)
/* call rt_free on entries after the tail requiring flush */ /* call rt_free on entries after the tail requiring flush */
prev = &rt_hash_table[i].chain; prev = &rt_hash_table[i].chain;
for (p = *prev; p; p = next) { for (p = rcu_dereference_protected(*prev,
next = p->dst.rt_next; lockdep_is_held(rt_hash_lock_addr(i)));
p != NULL;
p = next) {
next = rcu_dereference_protected(p->dst.rt_next,
lockdep_is_held(rt_hash_lock_addr(i)));
if (!rt_is_expired(p)) { if (!rt_is_expired(p)) {
prev = &p->dst.rt_next; prev = &p->dst.rt_next;
} else { } else {
@ -752,14 +760,15 @@ static void rt_do_flush(int process_context)
} }
} }
#else #else
rth = rt_hash_table[i].chain; rth = rcu_dereference_protected(rt_hash_table[i].chain,
rt_hash_table[i].chain = NULL; lockdep_is_held(rt_hash_lock_addr(i)));
rcu_assign_pointer(rt_hash_table[i].chain, NULL);
tail = NULL; tail = NULL;
#endif #endif
spin_unlock_bh(rt_hash_lock_addr(i)); spin_unlock_bh(rt_hash_lock_addr(i));
for (; rth != tail; rth = next) { for (; rth != tail; rth = next) {
next = rth->dst.rt_next; next = rcu_dereference_protected(rth->dst.rt_next, 1);
rt_free(rth); rt_free(rth);
} }
} }
@ -790,7 +799,7 @@ static int has_noalias(const struct rtable *head, const struct rtable *rth)
while (aux != rth) { while (aux != rth) {
if (compare_hash_inputs(&aux->fl, &rth->fl)) if (compare_hash_inputs(&aux->fl, &rth->fl))
return 0; return 0;
aux = aux->dst.rt_next; aux = rcu_dereference_protected(aux->dst.rt_next, 1);
} }
return ONE; return ONE;
} }
@ -799,7 +808,8 @@ static void rt_check_expire(void)
{ {
static unsigned int rover; static unsigned int rover;
unsigned int i = rover, goal; unsigned int i = rover, goal;
struct rtable *rth, **rthp; struct rtable *rth;
struct rtable __rcu **rthp;
unsigned long samples = 0; unsigned long samples = 0;
unsigned long sum = 0, sum2 = 0; unsigned long sum = 0, sum2 = 0;
unsigned long delta; unsigned long delta;
@ -825,11 +835,12 @@ static void rt_check_expire(void)
samples++; samples++;
if (*rthp == NULL) if (rcu_dereference_raw(*rthp) == NULL)
continue; continue;
length = 0; length = 0;
spin_lock_bh(rt_hash_lock_addr(i)); spin_lock_bh(rt_hash_lock_addr(i));
while ((rth = *rthp) != NULL) { while ((rth = rcu_dereference_protected(*rthp,
lockdep_is_held(rt_hash_lock_addr(i)))) != NULL) {
prefetch(rth->dst.rt_next); prefetch(rth->dst.rt_next);
if (rt_is_expired(rth)) { if (rt_is_expired(rth)) {
*rthp = rth->dst.rt_next; *rthp = rth->dst.rt_next;
@ -941,7 +952,8 @@ static int rt_garbage_collect(struct dst_ops *ops)
static unsigned long last_gc; static unsigned long last_gc;
static int rover; static int rover;
static int equilibrium; static int equilibrium;
struct rtable *rth, **rthp; struct rtable *rth;
struct rtable __rcu **rthp;
unsigned long now = jiffies; unsigned long now = jiffies;
int goal; int goal;
int entries = dst_entries_get_fast(&ipv4_dst_ops); int entries = dst_entries_get_fast(&ipv4_dst_ops);
@ -995,7 +1007,8 @@ static int rt_garbage_collect(struct dst_ops *ops)
k = (k + 1) & rt_hash_mask; k = (k + 1) & rt_hash_mask;
rthp = &rt_hash_table[k].chain; rthp = &rt_hash_table[k].chain;
spin_lock_bh(rt_hash_lock_addr(k)); spin_lock_bh(rt_hash_lock_addr(k));
while ((rth = *rthp) != NULL) { while ((rth = rcu_dereference_protected(*rthp,
lockdep_is_held(rt_hash_lock_addr(k)))) != NULL) {
if (!rt_is_expired(rth) && if (!rt_is_expired(rth) &&
!rt_may_expire(rth, tmo, expire)) { !rt_may_expire(rth, tmo, expire)) {
tmo >>= 1; tmo >>= 1;
@ -1071,7 +1084,7 @@ static int slow_chain_length(const struct rtable *head)
while (rth) { while (rth) {
length += has_noalias(head, rth); length += has_noalias(head, rth);
rth = rth->dst.rt_next; rth = rcu_dereference_protected(rth->dst.rt_next, 1);
} }
return length >> FRACT_BITS; return length >> FRACT_BITS;
} }
@ -1079,9 +1092,9 @@ static int slow_chain_length(const struct rtable *head)
static int rt_intern_hash(unsigned hash, struct rtable *rt, static int rt_intern_hash(unsigned hash, struct rtable *rt,
struct rtable **rp, struct sk_buff *skb, int ifindex) struct rtable **rp, struct sk_buff *skb, int ifindex)
{ {
struct rtable *rth, **rthp; struct rtable *rth, *cand;
struct rtable __rcu **rthp, **candp;
unsigned long now; unsigned long now;
struct rtable *cand, **candp;
u32 min_score; u32 min_score;
int chain_length; int chain_length;
int attempts = !in_softirq(); int attempts = !in_softirq();
@ -1128,7 +1141,8 @@ restart:
rthp = &rt_hash_table[hash].chain; rthp = &rt_hash_table[hash].chain;
spin_lock_bh(rt_hash_lock_addr(hash)); spin_lock_bh(rt_hash_lock_addr(hash));
while ((rth = *rthp) != NULL) { while ((rth = rcu_dereference_protected(*rthp,
lockdep_is_held(rt_hash_lock_addr(hash)))) != NULL) {
if (rt_is_expired(rth)) { if (rt_is_expired(rth)) {
*rthp = rth->dst.rt_next; *rthp = rth->dst.rt_next;
rt_free(rth); rt_free(rth);
@ -1324,12 +1338,14 @@ EXPORT_SYMBOL(__ip_select_ident);
static void rt_del(unsigned hash, struct rtable *rt) static void rt_del(unsigned hash, struct rtable *rt)
{ {
struct rtable **rthp, *aux; struct rtable __rcu **rthp;
struct rtable *aux;
rthp = &rt_hash_table[hash].chain; rthp = &rt_hash_table[hash].chain;
spin_lock_bh(rt_hash_lock_addr(hash)); spin_lock_bh(rt_hash_lock_addr(hash));
ip_rt_put(rt); ip_rt_put(rt);
while ((aux = *rthp) != NULL) { while ((aux = rcu_dereference_protected(*rthp,
lockdep_is_held(rt_hash_lock_addr(hash)))) != NULL) {
if (aux == rt || rt_is_expired(aux)) { if (aux == rt || rt_is_expired(aux)) {
*rthp = aux->dst.rt_next; *rthp = aux->dst.rt_next;
rt_free(aux); rt_free(aux);
@ -1346,7 +1362,8 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
{ {
int i, k; int i, k;
struct in_device *in_dev = __in_dev_get_rcu(dev); struct in_device *in_dev = __in_dev_get_rcu(dev);
struct rtable *rth, **rthp; struct rtable *rth;
struct rtable __rcu **rthp;
__be32 skeys[2] = { saddr, 0 }; __be32 skeys[2] = { saddr, 0 };
int ikeys[2] = { dev->ifindex, 0 }; int ikeys[2] = { dev->ifindex, 0 };
struct netevent_redirect netevent; struct netevent_redirect netevent;
@ -1379,7 +1396,7 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
unsigned hash = rt_hash(daddr, skeys[i], ikeys[k], unsigned hash = rt_hash(daddr, skeys[i], ikeys[k],
rt_genid(net)); rt_genid(net));
rthp=&rt_hash_table[hash].chain; rthp = &rt_hash_table[hash].chain;
while ((rth = rcu_dereference(*rthp)) != NULL) { while ((rth = rcu_dereference(*rthp)) != NULL) {
struct rtable *rt; struct rtable *rt;