mlxsw: spectrum: Set LAG port collector only when active

The LAG port collecting (receive) function was mistakenly set when the
port was registered as a LAG member, while it should be set only when
the port collection state is set to true. Set LAG port to collecting
when it is set to distributing, as described in the IEEE link
aggregation standard coupled control mux machine state diagram.

Signed-off-by: Nir Dotan <nird@mellanox.com>
Acked-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Nir Dotan 2019-02-12 16:29:51 +00:00 committed by David S. Miller
parent 3f9b2d2864
commit 48ebab31d4

View File

@ -4771,9 +4771,6 @@ static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index);
if (err)
goto err_col_port_add;
err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, lag_id);
if (err)
goto err_col_port_enable;
mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index,
mlxsw_sp_port->local_port);
@ -4787,8 +4784,6 @@ static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
return 0;
err_col_port_enable:
mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
err_col_port_add:
if (!lag->ref_count)
mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
@ -4807,7 +4802,6 @@ static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
WARN_ON(lag->ref_count == 0);
mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id);
mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
/* Any VLANs configured on the port are no longer valid */
@ -4852,21 +4846,56 @@ static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
}
static int mlxsw_sp_port_lag_tx_en_set(struct mlxsw_sp_port *mlxsw_sp_port,
bool lag_tx_enabled)
static int
mlxsw_sp_port_lag_col_dist_enable(struct mlxsw_sp_port *mlxsw_sp_port)
{
if (lag_tx_enabled)
return mlxsw_sp_lag_dist_port_add(mlxsw_sp_port,
mlxsw_sp_port->lag_id);
else
return mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port,
mlxsw_sp_port->lag_id);
int err;
err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port,
mlxsw_sp_port->lag_id);
if (err)
return err;
err = mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id);
if (err)
goto err_dist_port_add;
return 0;
err_dist_port_add:
mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, mlxsw_sp_port->lag_id);
return err;
}
static int
mlxsw_sp_port_lag_col_dist_disable(struct mlxsw_sp_port *mlxsw_sp_port)
{
int err;
err = mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port,
mlxsw_sp_port->lag_id);
if (err)
return err;
err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port,
mlxsw_sp_port->lag_id);
if (err)
goto err_col_port_disable;
return 0;
err_col_port_disable:
mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id);
return err;
}
static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port,
struct netdev_lag_lower_state_info *info)
{
return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, info->tx_enabled);
if (info->tx_enabled)
return mlxsw_sp_port_lag_col_dist_enable(mlxsw_sp_port);
else
return mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port);
}
static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
@ -5089,8 +5118,7 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
upper_dev);
} else {
mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port,
false);
mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port);
mlxsw_sp_port_lag_leave(mlxsw_sp_port,
upper_dev);
}