Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/net-queue

Tony Nguyen says:

====================
Intel Wired LAN Driver Updates 2023-11-06 (ice)

This series contains updates to ice driver only.

Dave removes SR-IOV LAG attribute for only the interface being disabled
to allow for proper unwinding of all interfaces.

Michal Schmidt changes some LAG allocations from GFP_KERNEL to GFP_ATOMIC
due to non-allowed sleeping.

Aniruddha and Marcin fix redirection and drop rules for switchdev by
properly setting and marking egress/ingress type.

* '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/net-queue:
  ice: Fix VF-VF direction matching in drop rule in switchdev
  ice: Fix VF-VF filter rules in switchdev mode
  ice: lag: in RCU, use atomic allocation
  ice: Fix SRIOV LAG disable on non-compliant aggregate
====================

Link: https://lore.kernel.org/r/20231107004844.655549-1-anthony.l.nguyen@intel.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski 2023-11-08 18:43:52 -08:00
commit 9b818a340c
2 changed files with 91 additions and 41 deletions

View File

@ -628,7 +628,7 @@ void ice_lag_move_new_vf_nodes(struct ice_vf *vf)
INIT_LIST_HEAD(&ndlist.node);
rcu_read_lock();
for_each_netdev_in_bond_rcu(lag->upper_netdev, tmp_nd) {
nl = kzalloc(sizeof(*nl), GFP_KERNEL);
nl = kzalloc(sizeof(*nl), GFP_ATOMIC);
if (!nl)
break;
@ -1555,18 +1555,12 @@ static void ice_lag_chk_disabled_bond(struct ice_lag *lag, void *ptr)
*/
static void ice_lag_disable_sriov_bond(struct ice_lag *lag)
{
struct ice_lag_netdev_list *entry;
struct ice_netdev_priv *np;
struct net_device *netdev;
struct ice_pf *pf;
list_for_each_entry(entry, lag->netdev_head, node) {
netdev = entry->netdev;
np = netdev_priv(netdev);
pf = np->vsi->back;
ice_clear_feature_support(pf, ICE_F_SRIOV_LAG);
}
np = netdev_priv(lag->netdev);
pf = np->vsi->back;
ice_clear_feature_support(pf, ICE_F_SRIOV_LAG);
}
/**
@ -1698,7 +1692,7 @@ ice_lag_event_handler(struct notifier_block *notif_blk, unsigned long event,
rcu_read_lock();
for_each_netdev_in_bond_rcu(upper_netdev, tmp_nd) {
nd_list = kzalloc(sizeof(*nd_list), GFP_KERNEL);
nd_list = kzalloc(sizeof(*nd_list), GFP_ATOMIC);
if (!nd_list)
break;
@ -2075,7 +2069,7 @@ void ice_lag_rebuild(struct ice_pf *pf)
INIT_LIST_HEAD(&ndlist.node);
rcu_read_lock();
for_each_netdev_in_bond_rcu(lag->upper_netdev, tmp_nd) {
nl = kzalloc(sizeof(*nl), GFP_KERNEL);
nl = kzalloc(sizeof(*nl), GFP_ATOMIC);
if (!nl)
break;

View File

@ -630,32 +630,83 @@ bool ice_is_tunnel_supported(struct net_device *dev)
return ice_tc_tun_get_type(dev) != TNL_LAST;
}
static int
ice_eswitch_tc_parse_action(struct ice_tc_flower_fltr *fltr,
struct flow_action_entry *act)
static bool ice_tc_is_dev_uplink(struct net_device *dev)
{
return netif_is_ice(dev) || ice_is_tunnel_supported(dev);
}
static int ice_tc_setup_redirect_action(struct net_device *filter_dev,
struct ice_tc_flower_fltr *fltr,
struct net_device *target_dev)
{
struct ice_repr *repr;
fltr->action.fltr_act = ICE_FWD_TO_VSI;
if (ice_is_port_repr_netdev(filter_dev) &&
ice_is_port_repr_netdev(target_dev)) {
repr = ice_netdev_to_repr(target_dev);
fltr->dest_vsi = repr->src_vsi;
fltr->direction = ICE_ESWITCH_FLTR_EGRESS;
} else if (ice_is_port_repr_netdev(filter_dev) &&
ice_tc_is_dev_uplink(target_dev)) {
repr = ice_netdev_to_repr(filter_dev);
fltr->dest_vsi = repr->src_vsi->back->switchdev.uplink_vsi;
fltr->direction = ICE_ESWITCH_FLTR_EGRESS;
} else if (ice_tc_is_dev_uplink(filter_dev) &&
ice_is_port_repr_netdev(target_dev)) {
repr = ice_netdev_to_repr(target_dev);
fltr->dest_vsi = repr->src_vsi;
fltr->direction = ICE_ESWITCH_FLTR_INGRESS;
} else {
NL_SET_ERR_MSG_MOD(fltr->extack,
"Unsupported netdevice in switchdev mode");
return -EINVAL;
}
return 0;
}
static int
ice_tc_setup_drop_action(struct net_device *filter_dev,
struct ice_tc_flower_fltr *fltr)
{
fltr->action.fltr_act = ICE_DROP_PACKET;
if (ice_is_port_repr_netdev(filter_dev)) {
fltr->direction = ICE_ESWITCH_FLTR_EGRESS;
} else if (ice_tc_is_dev_uplink(filter_dev)) {
fltr->direction = ICE_ESWITCH_FLTR_INGRESS;
} else {
NL_SET_ERR_MSG_MOD(fltr->extack,
"Unsupported netdevice in switchdev mode");
return -EINVAL;
}
return 0;
}
static int ice_eswitch_tc_parse_action(struct net_device *filter_dev,
struct ice_tc_flower_fltr *fltr,
struct flow_action_entry *act)
{
int err;
switch (act->id) {
case FLOW_ACTION_DROP:
fltr->action.fltr_act = ICE_DROP_PACKET;
err = ice_tc_setup_drop_action(filter_dev, fltr);
if (err)
return err;
break;
case FLOW_ACTION_REDIRECT:
fltr->action.fltr_act = ICE_FWD_TO_VSI;
if (ice_is_port_repr_netdev(act->dev)) {
repr = ice_netdev_to_repr(act->dev);
fltr->dest_vsi = repr->src_vsi;
fltr->direction = ICE_ESWITCH_FLTR_INGRESS;
} else if (netif_is_ice(act->dev) ||
ice_is_tunnel_supported(act->dev)) {
fltr->direction = ICE_ESWITCH_FLTR_EGRESS;
} else {
NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported netdevice in switchdev mode");
return -EINVAL;
}
err = ice_tc_setup_redirect_action(filter_dev, fltr, act->dev);
if (err)
return err;
break;
@ -696,10 +747,6 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
goto exit;
}
/* egress traffic is always redirect to uplink */
if (fltr->direction == ICE_ESWITCH_FLTR_EGRESS)
fltr->dest_vsi = vsi->back->switchdev.uplink_vsi;
rule_info.sw_act.fltr_act = fltr->action.fltr_act;
if (fltr->action.fltr_act != ICE_DROP_PACKET)
rule_info.sw_act.vsi_handle = fltr->dest_vsi->idx;
@ -713,13 +760,21 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
rule_info.flags_info.act_valid = true;
if (fltr->direction == ICE_ESWITCH_FLTR_INGRESS) {
/* Uplink to VF */
rule_info.sw_act.flag |= ICE_FLTR_RX;
rule_info.sw_act.src = hw->pf_id;
rule_info.flags_info.act = ICE_SINGLE_ACT_LB_ENABLE;
} else {
} else if (fltr->direction == ICE_ESWITCH_FLTR_EGRESS &&
fltr->dest_vsi == vsi->back->switchdev.uplink_vsi) {
/* VF to Uplink */
rule_info.sw_act.flag |= ICE_FLTR_TX;
rule_info.sw_act.src = vsi->idx;
rule_info.flags_info.act = ICE_SINGLE_ACT_LAN_ENABLE;
} else {
/* VF to VF */
rule_info.sw_act.flag |= ICE_FLTR_TX;
rule_info.sw_act.src = vsi->idx;
rule_info.flags_info.act = ICE_SINGLE_ACT_LB_ENABLE;
}
/* specify the cookie as filter_rule_id */
@ -1745,16 +1800,17 @@ ice_tc_parse_action(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr,
/**
* ice_parse_tc_flower_actions - Parse the actions for a TC filter
* @filter_dev: Pointer to device on which filter is being added
* @vsi: Pointer to VSI
* @cls_flower: Pointer to TC flower offload structure
* @fltr: Pointer to TC flower filter structure
*
* Parse the actions for a TC filter
*/
static int
ice_parse_tc_flower_actions(struct ice_vsi *vsi,
struct flow_cls_offload *cls_flower,
struct ice_tc_flower_fltr *fltr)
static int ice_parse_tc_flower_actions(struct net_device *filter_dev,
struct ice_vsi *vsi,
struct flow_cls_offload *cls_flower,
struct ice_tc_flower_fltr *fltr)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(cls_flower);
struct flow_action *flow_action = &rule->action;
@ -1769,7 +1825,7 @@ ice_parse_tc_flower_actions(struct ice_vsi *vsi,
flow_action_for_each(i, act, flow_action) {
if (ice_is_eswitch_mode_switchdev(vsi->back))
err = ice_eswitch_tc_parse_action(fltr, act);
err = ice_eswitch_tc_parse_action(filter_dev, fltr, act);
else
err = ice_tc_parse_action(vsi, fltr, act);
if (err)
@ -1856,7 +1912,7 @@ ice_add_tc_fltr(struct net_device *netdev, struct ice_vsi *vsi,
if (err < 0)
goto err;
err = ice_parse_tc_flower_actions(vsi, f, fltr);
err = ice_parse_tc_flower_actions(netdev, vsi, f, fltr);
if (err < 0)
goto err;