2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-25 05:34:00 +08:00

net/mlx5e: Support TC indirect block notifications for eswitch uplink reprs

Towards using this mechanism as the means to offload tunnel decap rules
set on SW tunnel devices instead of egdev, add the supporting structures
and functions.

Signed-off-by: Oz Shlomo <ozsh@mellanox.com>
Reviewed-by: Eli Britstein <elibr@mellanox.com>
Reviewed-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
This commit is contained in:
Oz Shlomo 2018-10-28 08:34:51 +02:00 committed by Saeed Mahameed
parent ec1366c207
commit f5bc2c5de1
4 changed files with 210 additions and 0 deletions

View File

@ -49,6 +49,15 @@
static const char mlx5e_rep_driver_name[] = "mlx5e_rep";
struct mlx5e_rep_indr_block_priv {
struct net_device *netdev;
struct mlx5e_rep_priv *rpriv;
struct list_head list;
};
static void mlx5e_rep_indr_unregister_block(struct net_device *netdev);
static void mlx5e_rep_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *drvinfo)
{
@ -518,6 +527,166 @@ static void mlx5e_rep_neigh_update(struct work_struct *work)
neigh_release(n);
}
static struct mlx5e_rep_indr_block_priv *
mlx5e_rep_indr_block_priv_lookup(struct mlx5e_rep_priv *rpriv,
struct net_device *netdev)
{
struct mlx5e_rep_indr_block_priv *cb_priv;
/* All callback list access should be protected by RTNL. */
ASSERT_RTNL();
list_for_each_entry(cb_priv,
&rpriv->uplink_priv.tc_indr_block_priv_list,
list)
if (cb_priv->netdev == netdev)
return cb_priv;
return NULL;
}
static void mlx5e_rep_indr_clean_block_privs(struct mlx5e_rep_priv *rpriv)
{
struct mlx5e_rep_indr_block_priv *cb_priv, *temp;
struct list_head *head = &rpriv->uplink_priv.tc_indr_block_priv_list;
list_for_each_entry_safe(cb_priv, temp, head, list) {
mlx5e_rep_indr_unregister_block(cb_priv->netdev);
kfree(cb_priv);
}
}
static int
mlx5e_rep_indr_offload(struct net_device *netdev,
struct tc_cls_flower_offload *flower,
struct mlx5e_rep_indr_block_priv *indr_priv)
{
return -EOPNOTSUPP;
}
static int mlx5e_rep_indr_setup_block_cb(enum tc_setup_type type,
void *type_data, void *indr_priv)
{
struct mlx5e_rep_indr_block_priv *priv = indr_priv;
switch (type) {
case TC_SETUP_CLSFLOWER:
return mlx5e_rep_indr_offload(priv->netdev, type_data, priv);
default:
return -EOPNOTSUPP;
}
}
static int
mlx5e_rep_indr_setup_tc_block(struct net_device *netdev,
struct mlx5e_rep_priv *rpriv,
struct tc_block_offload *f)
{
struct mlx5e_rep_indr_block_priv *indr_priv;
int err = 0;
if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
return -EOPNOTSUPP;
switch (f->command) {
case TC_BLOCK_BIND:
indr_priv = mlx5e_rep_indr_block_priv_lookup(rpriv, netdev);
if (indr_priv)
return -EEXIST;
indr_priv = kmalloc(sizeof(*indr_priv), GFP_KERNEL);
if (!indr_priv)
return -ENOMEM;
indr_priv->netdev = netdev;
indr_priv->rpriv = rpriv;
list_add(&indr_priv->list,
&rpriv->uplink_priv.tc_indr_block_priv_list);
err = tcf_block_cb_register(f->block,
mlx5e_rep_indr_setup_block_cb,
netdev, indr_priv, f->extack);
if (err) {
list_del(&indr_priv->list);
kfree(indr_priv);
}
return err;
case TC_BLOCK_UNBIND:
tcf_block_cb_unregister(f->block,
mlx5e_rep_indr_setup_block_cb,
netdev);
indr_priv = mlx5e_rep_indr_block_priv_lookup(rpriv, netdev);
if (indr_priv) {
list_del(&indr_priv->list);
kfree(indr_priv);
}
return 0;
default:
return -EOPNOTSUPP;
}
return 0;
}
static
int mlx5e_rep_indr_setup_tc_cb(struct net_device *netdev, void *cb_priv,
enum tc_setup_type type, void *type_data)
{
switch (type) {
case TC_SETUP_BLOCK:
return mlx5e_rep_indr_setup_tc_block(netdev, cb_priv,
type_data);
default:
return -EOPNOTSUPP;
}
}
static int mlx5e_rep_indr_register_block(struct mlx5e_rep_priv *rpriv,
struct net_device *netdev)
{
int err;
err = __tc_indr_block_cb_register(netdev, rpriv,
mlx5e_rep_indr_setup_tc_cb,
netdev);
if (err) {
struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
mlx5_core_err(priv->mdev, "Failed to register remote block notifier for %s err=%d\n",
netdev_name(netdev), err);
}
return err;
}
static void mlx5e_rep_indr_unregister_block(struct net_device *netdev)
{
__tc_indr_block_cb_unregister(netdev, mlx5e_rep_indr_setup_tc_cb,
netdev);
}
static int mlx5e_nic_rep_netdevice_event(struct notifier_block *nb,
unsigned long event, void *ptr)
{
struct mlx5e_rep_priv *rpriv = container_of(nb, struct mlx5e_rep_priv,
uplink_priv.netdevice_nb);
struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
if (!mlx5e_tc_tun_device_to_offload(priv, netdev))
return NOTIFY_OK;
switch (event) {
case NETDEV_REGISTER:
mlx5e_rep_indr_register_block(rpriv, netdev);
break;
case NETDEV_UNREGISTER:
mlx5e_rep_indr_unregister_block(netdev);
break;
}
return NOTIFY_OK;
}
static struct mlx5e_neigh_hash_entry *
mlx5e_rep_neigh_entry_lookup(struct mlx5e_priv *priv,
struct mlx5e_neigh *m_neigh);
@ -1262,8 +1431,19 @@ mlx5e_nic_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
if (err)
goto err_neigh_cleanup;
/* init indirect block notifications */
INIT_LIST_HEAD(&uplink_priv->tc_indr_block_priv_list);
uplink_priv->netdevice_nb.notifier_call = mlx5e_nic_rep_netdevice_event;
err = register_netdevice_notifier(&uplink_priv->netdevice_nb);
if (err) {
mlx5_core_err(priv->mdev, "Failed to register netdev notifier\n");
goto err_indirect_block_cleanup;
}
return 0;
err_indirect_block_cleanup:
mlx5e_tc_esw_cleanup(&uplink_priv->tc_ht);
err_neigh_cleanup:
mlx5e_rep_neigh_cleanup(rpriv);
err_remove_sqs:
@ -1280,6 +1460,10 @@ mlx5e_nic_rep_unload(struct mlx5_eswitch_rep *rep)
if (test_bit(MLX5E_STATE_OPENED, &priv->state))
mlx5e_remove_sqs_fwd_rules(priv);
/* clean indirect TC block notifications */
unregister_netdevice_notifier(&rpriv->uplink_priv.netdevice_nb);
mlx5e_rep_indr_clean_block_privs(rpriv);
/* clean uplink offloaded TC rules, delete shared tc flow table */
mlx5e_tc_esw_cleanup(&rpriv->uplink_priv.tc_ht);

View File

@ -58,6 +58,19 @@ struct mlx5_rep_uplink_priv {
* the uplink's VFs
*/
struct rhashtable tc_ht;
/* indirect block callbacks are invoked on bind/unbind events
* on registered higher level devices (e.g. tunnel devices)
*
* tc_indr_block_cb_priv_list is used to lookup indirect callback
* private data
*
* netdevice_nb is the netdev events notifier - used to register
* tunnel devices for block events
*
*/
struct list_head tc_indr_block_priv_list;
struct notifier_block netdevice_nb;
};
struct mlx5e_rep_priv {

View File

@ -2704,6 +2704,16 @@ out:
return err;
}
bool mlx5e_tc_tun_device_to_offload(struct mlx5e_priv *priv,
struct net_device *netdev)
{
if (netif_is_vxlan(netdev) &&
MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap))
return true;
return false;
}
static int mlx5e_attach_encap(struct mlx5e_priv *priv,
struct ip_tunnel_info *tun_info,
struct net_device *mirred_dev,

View File

@ -70,6 +70,9 @@ void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe);
int mlx5e_tc_num_filters(struct mlx5e_priv *priv);
bool mlx5e_tc_tun_device_to_offload(struct mlx5e_priv *priv,
struct net_device *netdev);
#else /* CONFIG_MLX5_ESWITCH */
static inline int mlx5e_tc_nic_init(struct mlx5e_priv *priv) { return 0; }
static inline void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv) {}