netfilter: nf_tables_offload: support indr block call

nftable support indr-block call. It makes nftable an offload vlan
and tunnel device.

nft add table netdev firewall
nft add chain netdev firewall aclout { type filter hook ingress offload device mlx_pf0vf0 priority - 300 \; }
nft add rule netdev firewall aclout ip daddr 10.0.0.1 fwd to vlan0
nft add chain netdev firewall aclin { type filter hook ingress device vlan0 priority - 300 \; }
nft add rule netdev firewall aclin ip daddr 10.0.0.7 fwd to mlx_pf0vf0

Signed-off-by: wenxu <wenxu@ucloud.cn>
Acked-by: Jakub Kicinski <jakub.kicinski@netronome.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
wenxu 2019-08-07 09:13:54 +08:00 committed by David S. Miller
parent 1150ab0f1b
commit 9a32669fec
3 changed files with 135 additions and 24 deletions

View File

@ -63,6 +63,10 @@ struct nft_rule;
struct nft_flow_rule *nft_flow_rule_create(const struct nft_rule *rule);
void nft_flow_rule_destroy(struct nft_flow_rule *flow);
int nft_flow_rule_offload_commit(struct net *net);
void nft_indr_block_get_and_ing_cmd(struct net_device *dev,
flow_indr_block_bind_cb_t *cb,
void *cb_priv,
enum flow_block_command command);
#define NFT_OFFLOAD_MATCH(__key, __base, __field, __len, __reg) \
(__reg)->base_offset = \

View File

@ -7593,6 +7593,11 @@ static struct pernet_operations nf_tables_net_ops = {
.exit = nf_tables_exit_net,
};
static struct flow_indr_block_ing_entry block_ing_entry = {
.cb = nft_indr_block_get_and_ing_cmd,
.list = LIST_HEAD_INIT(block_ing_entry.list),
};
static int __init nf_tables_module_init(void)
{
int err;
@ -7624,6 +7629,7 @@ static int __init nf_tables_module_init(void)
goto err5;
nft_chain_route_init();
flow_indr_add_block_ing_cb(&block_ing_entry);
return err;
err5:
rhltable_destroy(&nft_objname_ht);
@ -7640,6 +7646,7 @@ err1:
static void __exit nf_tables_module_exit(void)
{
flow_indr_del_block_ing_cb(&block_ing_entry);
nfnetlink_subsys_unregister(&nf_tables_subsys);
unregister_netdevice_notifier(&nf_tables_flowtable_notifier);
nft_chain_filter_fini();

View File

@ -171,24 +171,110 @@ static int nft_flow_offload_unbind(struct flow_block_offload *bo,
return 0;
}
static int nft_block_setup(struct nft_base_chain *basechain,
struct flow_block_offload *bo,
enum flow_block_command cmd)
{
int err;
switch (cmd) {
case FLOW_BLOCK_BIND:
err = nft_flow_offload_bind(bo, basechain);
break;
case FLOW_BLOCK_UNBIND:
err = nft_flow_offload_unbind(bo, basechain);
break;
default:
WARN_ON_ONCE(1);
err = -EOPNOTSUPP;
}
return err;
}
static int nft_block_offload_cmd(struct nft_base_chain *chain,
struct net_device *dev,
enum flow_block_command cmd)
{
struct netlink_ext_ack extack = {};
struct flow_block_offload bo = {};
int err;
bo.net = dev_net(dev);
bo.block = &chain->flow_block;
bo.command = cmd;
bo.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS;
bo.extack = &extack;
INIT_LIST_HEAD(&bo.cb_list);
err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
if (err < 0)
return err;
return nft_block_setup(chain, &bo, cmd);
}
static void nft_indr_block_ing_cmd(struct net_device *dev,
struct nft_base_chain *chain,
flow_indr_block_bind_cb_t *cb,
void *cb_priv,
enum flow_block_command cmd)
{
struct netlink_ext_ack extack = {};
struct flow_block_offload bo = {};
if (!chain)
return;
bo.net = dev_net(dev);
bo.block = &chain->flow_block;
bo.command = cmd;
bo.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS;
bo.extack = &extack;
INIT_LIST_HEAD(&bo.cb_list);
cb(dev, cb_priv, TC_SETUP_BLOCK, &bo);
nft_block_setup(chain, &bo, cmd);
}
static int nft_indr_block_offload_cmd(struct nft_base_chain *chain,
struct net_device *dev,
enum flow_block_command cmd)
{
struct flow_block_offload bo = {};
struct netlink_ext_ack extack = {};
bo.net = dev_net(dev);
bo.block = &chain->flow_block;
bo.command = cmd;
bo.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS;
bo.extack = &extack;
INIT_LIST_HEAD(&bo.cb_list);
flow_indr_block_call(dev, &bo, cmd);
if (list_empty(&bo.cb_list))
return -EOPNOTSUPP;
return nft_block_setup(chain, &bo, cmd);
}
#define FLOW_SETUP_BLOCK TC_SETUP_BLOCK
static int nft_flow_offload_chain(struct nft_trans *trans,
enum flow_block_command cmd)
{
struct nft_chain *chain = trans->ctx.chain;
struct netlink_ext_ack extack = {};
struct flow_block_offload bo = {};
struct nft_base_chain *basechain;
struct net_device *dev;
int err;
if (!nft_is_base_chain(chain))
return -EOPNOTSUPP;
basechain = nft_base_chain(chain);
dev = basechain->ops.dev;
if (!dev || !dev->netdev_ops->ndo_setup_tc)
if (!dev)
return -EOPNOTSUPP;
/* Only default policy to accept is supported for now. */
@ -197,26 +283,10 @@ static int nft_flow_offload_chain(struct nft_trans *trans,
nft_trans_chain_policy(trans) != NF_ACCEPT)
return -EOPNOTSUPP;
bo.command = cmd;
bo.block = &basechain->flow_block;
bo.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS;
bo.extack = &extack;
INIT_LIST_HEAD(&bo.cb_list);
err = dev->netdev_ops->ndo_setup_tc(dev, FLOW_SETUP_BLOCK, &bo);
if (err < 0)
return err;
switch (cmd) {
case FLOW_BLOCK_BIND:
err = nft_flow_offload_bind(&bo, basechain);
break;
case FLOW_BLOCK_UNBIND:
err = nft_flow_offload_unbind(&bo, basechain);
break;
}
return err;
if (dev->netdev_ops->ndo_setup_tc)
return nft_block_offload_cmd(basechain, dev, cmd);
else
return nft_indr_block_offload_cmd(basechain, dev, cmd);
}
int nft_flow_rule_offload_commit(struct net *net)
@ -266,3 +336,33 @@ int nft_flow_rule_offload_commit(struct net *net)
return err;
}
void nft_indr_block_get_and_ing_cmd(struct net_device *dev,
flow_indr_block_bind_cb_t *cb,
void *cb_priv,
enum flow_block_command command)
{
struct net *net = dev_net(dev);
const struct nft_table *table;
const struct nft_chain *chain;
list_for_each_entry_rcu(table, &net->nft.tables, list) {
if (table->family != NFPROTO_NETDEV)
continue;
list_for_each_entry_rcu(chain, &table->chains, list) {
if (nft_is_base_chain(chain)) {
struct nft_base_chain *basechain;
basechain = nft_base_chain(chain);
if (!strncmp(basechain->dev_name, dev->name,
IFNAMSIZ)) {
nft_indr_block_ing_cmd(dev, basechain,
cb, cb_priv,
command);
return;
}
}
}
}
}