mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-24 13:13:57 +08:00
7a9bc9b81a
If rpfilter is off (or the SKB has an IPSEC path) and there are not tclassid users, we don't have to do anything at all when fib_validate_source() is invoked besides setting the itag to zero. We monitor tclassid uses with a counter (modified only under RTNL and marked __read_mostly) and we protect the fib_validate_source() real work with a test against this counter and whether rpfilter is to be done. Having a way to know whether we need no tclassid processing or not also opens the door for future optimized rpfilter algorithms that do not perform full FIB lookups. Signed-off-by: David S. Miller <davem@davemloft.net>
121 lines
3.0 KiB
C
121 lines
3.0 KiB
C
#ifndef __NET_FIB_RULES_H
|
|
#define __NET_FIB_RULES_H
|
|
|
|
#include <linux/types.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/netdevice.h>
|
|
#include <linux/fib_rules.h>
|
|
#include <net/flow.h>
|
|
#include <net/rtnetlink.h>
|
|
|
|
struct fib_rule {
|
|
struct list_head list;
|
|
atomic_t refcnt;
|
|
int iifindex;
|
|
int oifindex;
|
|
u32 mark;
|
|
u32 mark_mask;
|
|
u32 pref;
|
|
u32 flags;
|
|
u32 table;
|
|
u8 action;
|
|
u32 target;
|
|
struct fib_rule __rcu *ctarget;
|
|
char iifname[IFNAMSIZ];
|
|
char oifname[IFNAMSIZ];
|
|
struct rcu_head rcu;
|
|
struct net * fr_net;
|
|
};
|
|
|
|
struct fib_lookup_arg {
|
|
void *lookup_ptr;
|
|
void *result;
|
|
struct fib_rule *rule;
|
|
int flags;
|
|
#define FIB_LOOKUP_NOREF 1
|
|
};
|
|
|
|
struct fib_rules_ops {
|
|
int family;
|
|
struct list_head list;
|
|
int rule_size;
|
|
int addr_size;
|
|
int unresolved_rules;
|
|
int nr_goto_rules;
|
|
|
|
int (*action)(struct fib_rule *,
|
|
struct flowi *, int,
|
|
struct fib_lookup_arg *);
|
|
int (*match)(struct fib_rule *,
|
|
struct flowi *, int);
|
|
int (*configure)(struct fib_rule *,
|
|
struct sk_buff *,
|
|
struct fib_rule_hdr *,
|
|
struct nlattr **);
|
|
void (*delete)(struct fib_rule *);
|
|
int (*compare)(struct fib_rule *,
|
|
struct fib_rule_hdr *,
|
|
struct nlattr **);
|
|
int (*fill)(struct fib_rule *, struct sk_buff *,
|
|
struct fib_rule_hdr *);
|
|
u32 (*default_pref)(struct fib_rules_ops *ops);
|
|
size_t (*nlmsg_payload)(struct fib_rule *);
|
|
|
|
/* Called after modifications to the rules set, must flush
|
|
* the route cache if one exists. */
|
|
void (*flush_cache)(struct fib_rules_ops *ops);
|
|
|
|
int nlgroup;
|
|
const struct nla_policy *policy;
|
|
struct list_head rules_list;
|
|
struct module *owner;
|
|
struct net *fro_net;
|
|
struct rcu_head rcu;
|
|
};
|
|
|
|
#define FRA_GENERIC_POLICY \
|
|
[FRA_IIFNAME] = { .type = NLA_STRING, .len = IFNAMSIZ - 1 }, \
|
|
[FRA_OIFNAME] = { .type = NLA_STRING, .len = IFNAMSIZ - 1 }, \
|
|
[FRA_PRIORITY] = { .type = NLA_U32 }, \
|
|
[FRA_FWMARK] = { .type = NLA_U32 }, \
|
|
[FRA_FWMASK] = { .type = NLA_U32 }, \
|
|
[FRA_TABLE] = { .type = NLA_U32 }, \
|
|
[FRA_GOTO] = { .type = NLA_U32 }
|
|
|
|
static inline void fib_rule_get(struct fib_rule *rule)
|
|
{
|
|
atomic_inc(&rule->refcnt);
|
|
}
|
|
|
|
static inline void fib_rule_put_rcu(struct rcu_head *head)
|
|
{
|
|
struct fib_rule *rule = container_of(head, struct fib_rule, rcu);
|
|
release_net(rule->fr_net);
|
|
kfree(rule);
|
|
}
|
|
|
|
static inline void fib_rule_put(struct fib_rule *rule)
|
|
{
|
|
if (atomic_dec_and_test(&rule->refcnt))
|
|
call_rcu(&rule->rcu, fib_rule_put_rcu);
|
|
}
|
|
|
|
static inline u32 frh_get_table(struct fib_rule_hdr *frh, struct nlattr **nla)
|
|
{
|
|
if (nla[FRA_TABLE])
|
|
return nla_get_u32(nla[FRA_TABLE]);
|
|
return frh->table;
|
|
}
|
|
|
|
extern struct fib_rules_ops *fib_rules_register(const struct fib_rules_ops *, struct net *);
|
|
extern void fib_rules_unregister(struct fib_rules_ops *);
|
|
|
|
extern int fib_rules_lookup(struct fib_rules_ops *,
|
|
struct flowi *, int flags,
|
|
struct fib_lookup_arg *);
|
|
extern int fib_default_rule_add(struct fib_rules_ops *,
|
|
u32 pref, u32 table,
|
|
u32 flags);
|
|
extern u32 fib_default_rule_pref(struct fib_rules_ops *ops);
|
|
#endif
|