net: dsa: create a dsa_lag structure

The main purpose of this change is to create a data structure for a LAG
as seen by DSA. This is similar to what we have for bridging - we pass a
copy of this structure by value to ->port_lag_join and ->port_lag_leave.
For now we keep the lag_dev, id and a reference count in it. Future
patches will add a list of FDB entries for the LAG (these also need to
be refcounted to work properly).

The LAG structure is created using dsa_port_lag_create() and destroyed
using dsa_port_lag_destroy(), just like we have for bridging.

Because now, the dsa_lag itself is refcounted, we can simplify
dsa_lag_map() and dsa_lag_unmap(). These functions need to keep a LAG in
the dst->lags array only as long as at least one port uses it. The
refcounting logic inside those functions can be removed now - they are
called only when we should perform the operation.

dsa_lag_dev() is renamed to dsa_lag_by_id() and now returns the dsa_lag
structure instead of the lag_dev net_device.

dsa_lag_foreach_port() now takes the dsa_lag structure as argument.

dst->lags holds an array of dsa_lag structures.

dsa_lag_map() now also saves the dsa_lag->id value, so that linear
walking of dst->lags in drivers using dsa_lag_id() is no longer
necessary. They can just look at lag.id.

dsa_port_lag_id_get() is a helper, similar to dsa_port_bridge_num_get(),
which can be used by drivers to get the LAG ID assigned by DSA to a
given port.

Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Vladimir Oltean 2022-02-23 16:00:49 +02:00 committed by Jakub Kicinski
parent b99dbdf00b
commit dedd6a009f
10 changed files with 173 additions and 109 deletions

View File

@ -1625,7 +1625,7 @@ static int mv88e6xxx_pvt_map(struct mv88e6xxx_chip *chip, int dev, int port)
ds = dsa_switch_find(dst->index, dev);
dp = ds ? dsa_to_port(ds, port) : NULL;
if (dp && dp->lag_dev) {
if (dp && dp->lag) {
/* As the PVT is used to limit flooding of
* FORWARD frames, which use the LAG ID as the
* source port, we must translate dev/port to
@ -1634,7 +1634,7 @@ static int mv88e6xxx_pvt_map(struct mv88e6xxx_chip *chip, int dev, int port)
* (zero-based).
*/
dev = MV88E6XXX_G2_PVT_ADDR_DEV_TRUNK;
port = dsa_lag_id(dst, dp->lag_dev) - 1;
port = dsa_port_lag_id_get(dp) - 1;
}
}
@ -1672,7 +1672,7 @@ static void mv88e6xxx_port_fast_age(struct dsa_switch *ds, int port)
struct mv88e6xxx_chip *chip = ds->priv;
int err;
if (dsa_to_port(ds, port)->lag_dev)
if (dsa_to_port(ds, port)->lag)
/* Hardware is incapable of fast-aging a LAG through a
* regular ATU move operation. Until we have something
* more fancy in place this is a no-op.
@ -6176,21 +6176,20 @@ out:
}
static bool mv88e6xxx_lag_can_offload(struct dsa_switch *ds,
struct net_device *lag_dev,
struct dsa_lag lag,
struct netdev_lag_upper_info *info)
{
struct mv88e6xxx_chip *chip = ds->priv;
struct dsa_port *dp;
int id, members = 0;
int members = 0;
if (!mv88e6xxx_has_lag(chip))
return false;
id = dsa_lag_id(ds->dst, lag_dev);
if (id <= 0 || id > ds->num_lag_ids)
if (!lag.id)
return false;
dsa_lag_foreach_port(dp, ds->dst, lag_dev)
dsa_lag_foreach_port(dp, ds->dst, &lag)
/* Includes the port joining the LAG */
members++;
@ -6210,8 +6209,7 @@ static bool mv88e6xxx_lag_can_offload(struct dsa_switch *ds,
return true;
}
static int mv88e6xxx_lag_sync_map(struct dsa_switch *ds,
struct net_device *lag_dev)
static int mv88e6xxx_lag_sync_map(struct dsa_switch *ds, struct dsa_lag lag)
{
struct mv88e6xxx_chip *chip = ds->priv;
struct dsa_port *dp;
@ -6219,13 +6217,13 @@ static int mv88e6xxx_lag_sync_map(struct dsa_switch *ds,
int id;
/* DSA LAG IDs are one-based, hardware is zero-based */
id = dsa_lag_id(ds->dst, lag_dev) - 1;
id = lag.id - 1;
/* Build the map of all ports to distribute flows destined for
* this LAG. This can be either a local user port, or a DSA
* port if the LAG port is on a remote chip.
*/
dsa_lag_foreach_port(dp, ds->dst, lag_dev)
dsa_lag_foreach_port(dp, ds->dst, &lag)
map |= BIT(dsa_towards_port(ds, dp->ds->index, dp->index));
return mv88e6xxx_g2_trunk_mapping_write(chip, id, map);
@ -6269,9 +6267,9 @@ static void mv88e6xxx_lag_set_port_mask(u16 *mask, int port,
static int mv88e6xxx_lag_sync_masks(struct dsa_switch *ds)
{
struct mv88e6xxx_chip *chip = ds->priv;
struct net_device *lag_dev;
unsigned int id, num_tx;
struct dsa_port *dp;
struct dsa_lag *lag;
int i, err, nth;
u16 mask[8];
u16 ivec;
@ -6281,7 +6279,7 @@ static int mv88e6xxx_lag_sync_masks(struct dsa_switch *ds)
/* Disable all masks for ports that _are_ members of a LAG. */
dsa_switch_for_each_port(dp, ds) {
if (!dp->lag_dev)
if (!dp->lag)
continue;
ivec &= ~BIT(dp->index);
@ -6294,12 +6292,12 @@ static int mv88e6xxx_lag_sync_masks(struct dsa_switch *ds)
* are in the Tx set.
*/
dsa_lags_foreach_id(id, ds->dst) {
lag_dev = dsa_lag_dev(ds->dst, id);
if (!lag_dev)
lag = dsa_lag_by_id(ds->dst, id);
if (!lag)
continue;
num_tx = 0;
dsa_lag_foreach_port(dp, ds->dst, lag_dev) {
dsa_lag_foreach_port(dp, ds->dst, lag) {
if (dp->lag_tx_enabled)
num_tx++;
}
@ -6308,7 +6306,7 @@ static int mv88e6xxx_lag_sync_masks(struct dsa_switch *ds)
continue;
nth = 0;
dsa_lag_foreach_port(dp, ds->dst, lag_dev) {
dsa_lag_foreach_port(dp, ds->dst, lag) {
if (!dp->lag_tx_enabled)
continue;
@ -6330,14 +6328,14 @@ static int mv88e6xxx_lag_sync_masks(struct dsa_switch *ds)
}
static int mv88e6xxx_lag_sync_masks_map(struct dsa_switch *ds,
struct net_device *lag_dev)
struct dsa_lag lag)
{
int err;
err = mv88e6xxx_lag_sync_masks(ds);
if (!err)
err = mv88e6xxx_lag_sync_map(ds, lag_dev);
err = mv88e6xxx_lag_sync_map(ds, lag);
return err;
}
@ -6354,17 +6352,17 @@ static int mv88e6xxx_port_lag_change(struct dsa_switch *ds, int port)
}
static int mv88e6xxx_port_lag_join(struct dsa_switch *ds, int port,
struct net_device *lag_dev,
struct dsa_lag lag,
struct netdev_lag_upper_info *info)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err, id;
if (!mv88e6xxx_lag_can_offload(ds, lag_dev, info))
if (!mv88e6xxx_lag_can_offload(ds, lag, info))
return -EOPNOTSUPP;
/* DSA LAG IDs are one-based */
id = dsa_lag_id(ds->dst, lag_dev) - 1;
id = lag.id - 1;
mv88e6xxx_reg_lock(chip);
@ -6372,7 +6370,7 @@ static int mv88e6xxx_port_lag_join(struct dsa_switch *ds, int port,
if (err)
goto err_unlock;
err = mv88e6xxx_lag_sync_masks_map(ds, lag_dev);
err = mv88e6xxx_lag_sync_masks_map(ds, lag);
if (err)
goto err_clear_trunk;
@ -6387,13 +6385,13 @@ err_unlock:
}
static int mv88e6xxx_port_lag_leave(struct dsa_switch *ds, int port,
struct net_device *lag_dev)
struct dsa_lag lag)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err_sync, err_trunk;
mv88e6xxx_reg_lock(chip);
err_sync = mv88e6xxx_lag_sync_masks_map(ds, lag_dev);
err_sync = mv88e6xxx_lag_sync_masks_map(ds, lag);
err_trunk = mv88e6xxx_port_set_trunk(chip, port, false, 0);
mv88e6xxx_reg_unlock(chip);
return err_sync ? : err_trunk;
@ -6412,18 +6410,18 @@ static int mv88e6xxx_crosschip_lag_change(struct dsa_switch *ds, int sw_index,
}
static int mv88e6xxx_crosschip_lag_join(struct dsa_switch *ds, int sw_index,
int port, struct net_device *lag_dev,
int port, struct dsa_lag lag,
struct netdev_lag_upper_info *info)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err;
if (!mv88e6xxx_lag_can_offload(ds, lag_dev, info))
if (!mv88e6xxx_lag_can_offload(ds, lag, info))
return -EOPNOTSUPP;
mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_lag_sync_masks_map(ds, lag_dev);
err = mv88e6xxx_lag_sync_masks_map(ds, lag);
if (err)
goto unlock;
@ -6435,13 +6433,13 @@ unlock:
}
static int mv88e6xxx_crosschip_lag_leave(struct dsa_switch *ds, int sw_index,
int port, struct net_device *lag_dev)
int port, struct dsa_lag lag)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err_sync, err_pvt;
mv88e6xxx_reg_lock(chip);
err_sync = mv88e6xxx_lag_sync_masks_map(ds, lag_dev);
err_sync = mv88e6xxx_lag_sync_masks_map(ds, lag);
err_pvt = mv88e6xxx_pvt_map(chip, sw_index, port);
mv88e6xxx_reg_unlock(chip);
return err_sync ? : err_pvt;

View File

@ -677,20 +677,20 @@ static void felix_bridge_leave(struct dsa_switch *ds, int port,
}
static int felix_lag_join(struct dsa_switch *ds, int port,
struct net_device *bond,
struct dsa_lag lag,
struct netdev_lag_upper_info *info)
{
struct ocelot *ocelot = ds->priv;
return ocelot_port_lag_join(ocelot, port, bond, info);
return ocelot_port_lag_join(ocelot, port, lag.dev, info);
}
static int felix_lag_leave(struct dsa_switch *ds, int port,
struct net_device *bond)
struct dsa_lag lag)
{
struct ocelot *ocelot = ds->priv;
ocelot_port_lag_leave(ocelot, port, bond);
ocelot_port_lag_leave(ocelot, port, lag.dev);
return 0;
}

View File

@ -2646,18 +2646,16 @@ qca8k_get_tag_protocol(struct dsa_switch *ds, int port,
}
static bool
qca8k_lag_can_offload(struct dsa_switch *ds,
struct net_device *lag_dev,
qca8k_lag_can_offload(struct dsa_switch *ds, struct dsa_lag lag,
struct netdev_lag_upper_info *info)
{
struct dsa_port *dp;
int id, members = 0;
int members = 0;
id = dsa_lag_id(ds->dst, lag_dev);
if (id <= 0 || id > ds->num_lag_ids)
if (!lag.id)
return false;
dsa_lag_foreach_port(dp, ds->dst, lag_dev)
dsa_lag_foreach_port(dp, ds->dst, &lag)
/* Includes the port joining the LAG */
members++;
@ -2675,16 +2673,14 @@ qca8k_lag_can_offload(struct dsa_switch *ds,
}
static int
qca8k_lag_setup_hash(struct dsa_switch *ds,
struct net_device *lag_dev,
qca8k_lag_setup_hash(struct dsa_switch *ds, struct dsa_lag lag,
struct netdev_lag_upper_info *info)
{
struct net_device *lag_dev = lag.dev;
struct qca8k_priv *priv = ds->priv;
bool unique_lag = true;
unsigned int i;
u32 hash = 0;
int i, id;
id = dsa_lag_id(ds->dst, lag_dev);
switch (info->hash_type) {
case NETDEV_LAG_HASH_L23:
@ -2701,7 +2697,7 @@ qca8k_lag_setup_hash(struct dsa_switch *ds,
/* Check if we are the unique configured LAG */
dsa_lags_foreach_id(i, ds->dst)
if (i != id && dsa_lag_dev(ds->dst, i)) {
if (i != lag.id && dsa_lag_by_id(ds->dst, i)) {
unique_lag = false;
break;
}
@ -2726,14 +2722,14 @@ qca8k_lag_setup_hash(struct dsa_switch *ds,
static int
qca8k_lag_refresh_portmap(struct dsa_switch *ds, int port,
struct net_device *lag_dev, bool delete)
struct dsa_lag lag, bool delete)
{
struct qca8k_priv *priv = ds->priv;
int ret, id, i;
u32 val;
/* DSA LAG IDs are one-based, hardware is zero-based */
id = dsa_lag_id(ds->dst, lag_dev) - 1;
id = lag.id - 1;
/* Read current port member */
ret = regmap_read(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL0, &val);
@ -2795,27 +2791,26 @@ qca8k_lag_refresh_portmap(struct dsa_switch *ds, int port,
}
static int
qca8k_port_lag_join(struct dsa_switch *ds, int port,
struct net_device *lag_dev,
qca8k_port_lag_join(struct dsa_switch *ds, int port, struct dsa_lag lag,
struct netdev_lag_upper_info *info)
{
int ret;
if (!qca8k_lag_can_offload(ds, lag_dev, info))
if (!qca8k_lag_can_offload(ds, lag, info))
return -EOPNOTSUPP;
ret = qca8k_lag_setup_hash(ds, lag_dev, info);
ret = qca8k_lag_setup_hash(ds, lag, info);
if (ret)
return ret;
return qca8k_lag_refresh_portmap(ds, port, lag_dev, false);
return qca8k_lag_refresh_portmap(ds, port, lag, false);
}
static int
qca8k_port_lag_leave(struct dsa_switch *ds, int port,
struct net_device *lag_dev)
struct dsa_lag lag)
{
return qca8k_lag_refresh_portmap(ds, port, lag_dev, true);
return qca8k_lag_refresh_portmap(ds, port, lag, true);
}
static void

View File

@ -116,6 +116,12 @@ struct dsa_netdevice_ops {
#define MODULE_ALIAS_DSA_TAG_DRIVER(__proto) \
MODULE_ALIAS(DSA_TAG_DRIVER_ALIAS __stringify(__proto##_VALUE))
struct dsa_lag {
struct net_device *dev;
unsigned int id;
refcount_t refcount;
};
struct dsa_switch_tree {
struct list_head list;
@ -134,7 +140,7 @@ struct dsa_switch_tree {
/* Maps offloaded LAG netdevs to a zero-based linear ID for
* drivers that need it.
*/
struct net_device **lags;
struct dsa_lag **lags;
/* Tagging protocol operations */
const struct dsa_device_ops *tag_ops;
@ -170,14 +176,14 @@ struct dsa_switch_tree {
#define dsa_lag_foreach_port(_dp, _dst, _lag) \
list_for_each_entry((_dp), &(_dst)->ports, list) \
if ((_dp)->lag_dev == (_lag))
if (dsa_port_offloads_lag((_dp), (_lag)))
#define dsa_hsr_foreach_port(_dp, _ds, _hsr) \
list_for_each_entry((_dp), &(_ds)->dst->ports, list) \
if ((_dp)->ds == (_ds) && (_dp)->hsr_dev == (_hsr))
static inline struct net_device *dsa_lag_dev(struct dsa_switch_tree *dst,
unsigned int id)
static inline struct dsa_lag *dsa_lag_by_id(struct dsa_switch_tree *dst,
unsigned int id)
{
/* DSA LAG IDs are one-based, dst->lags is zero-based */
return dst->lags[id - 1];
@ -189,8 +195,10 @@ static inline int dsa_lag_id(struct dsa_switch_tree *dst,
unsigned int id;
dsa_lags_foreach_id(id, dst) {
if (dsa_lag_dev(dst, id) == lag_dev)
return id;
struct dsa_lag *lag = dsa_lag_by_id(dst, id);
if (lag->dev == lag_dev)
return lag->id;
}
return -ENODEV;
@ -293,7 +301,7 @@ struct dsa_port {
struct devlink_port devlink_port;
struct phylink *pl;
struct phylink_config pl_config;
struct net_device *lag_dev;
struct dsa_lag *lag;
struct net_device *hsr_dev;
struct list_head list;
@ -643,14 +651,30 @@ static inline bool dsa_port_is_vlan_filtering(const struct dsa_port *dp)
return dp->vlan_filtering;
}
static inline unsigned int dsa_port_lag_id_get(struct dsa_port *dp)
{
return dp->lag ? dp->lag->id : 0;
}
static inline struct net_device *dsa_port_lag_dev_get(struct dsa_port *dp)
{
return dp->lag ? dp->lag->dev : NULL;
}
static inline bool dsa_port_offloads_lag(struct dsa_port *dp,
const struct dsa_lag *lag)
{
return dsa_port_lag_dev_get(dp) == lag->dev;
}
static inline
struct net_device *dsa_port_to_bridge_port(const struct dsa_port *dp)
{
if (!dp->bridge)
return NULL;
if (dp->lag_dev)
return dp->lag_dev;
if (dp->lag)
return dp->lag->dev;
else if (dp->hsr_dev)
return dp->hsr_dev;
@ -968,10 +992,10 @@ struct dsa_switch_ops {
int (*crosschip_lag_change)(struct dsa_switch *ds, int sw_index,
int port);
int (*crosschip_lag_join)(struct dsa_switch *ds, int sw_index,
int port, struct net_device *lag_dev,
int port, struct dsa_lag lag,
struct netdev_lag_upper_info *info);
int (*crosschip_lag_leave)(struct dsa_switch *ds, int sw_index,
int port, struct net_device *lag_dev);
int port, struct dsa_lag lag);
/*
* PTP functionality
@ -1043,10 +1067,10 @@ struct dsa_switch_ops {
*/
int (*port_lag_change)(struct dsa_switch *ds, int port);
int (*port_lag_join)(struct dsa_switch *ds, int port,
struct net_device *lag_dev,
struct dsa_lag lag,
struct netdev_lag_upper_info *info);
int (*port_lag_leave)(struct dsa_switch *ds, int port,
struct net_device *lag_dev);
struct dsa_lag lag);
/*
* HSR integration

View File

@ -72,27 +72,24 @@ int dsa_broadcast(unsigned long e, void *v)
}
/**
* dsa_lag_map() - Map LAG netdev to a linear LAG ID
* dsa_lag_map() - Map LAG structure to a linear LAG array
* @dst: Tree in which to record the mapping.
* @lag_dev: Netdev that is to be mapped to an ID.
* @lag: LAG structure that is to be mapped to the tree's array.
*
* dsa_lag_id/dsa_lag_dev can then be used to translate between the
* dsa_lag_id/dsa_lag_by_id can then be used to translate between the
* two spaces. The size of the mapping space is determined by the
* driver by setting ds->num_lag_ids. It is perfectly legal to leave
* it unset if it is not needed, in which case these functions become
* no-ops.
*/
void dsa_lag_map(struct dsa_switch_tree *dst, struct net_device *lag_dev)
void dsa_lag_map(struct dsa_switch_tree *dst, struct dsa_lag *lag)
{
unsigned int id;
if (dsa_lag_id(dst, lag_dev) > 0)
/* Already mapped */
return;
for (id = 1; id <= dst->lags_len; id++) {
if (!dsa_lag_dev(dst, id)) {
dst->lags[id - 1] = lag_dev;
if (!dsa_lag_by_id(dst, id)) {
dst->lags[id - 1] = lag;
lag->id = id;
return;
}
}
@ -108,28 +105,36 @@ void dsa_lag_map(struct dsa_switch_tree *dst, struct net_device *lag_dev)
/**
* dsa_lag_unmap() - Remove a LAG ID mapping
* @dst: Tree in which the mapping is recorded.
* @lag_dev: Netdev that was mapped.
* @lag: LAG structure that was mapped.
*
* As there may be multiple users of the mapping, it is only removed
* if there are no other references to it.
*/
void dsa_lag_unmap(struct dsa_switch_tree *dst, struct net_device *lag_dev)
void dsa_lag_unmap(struct dsa_switch_tree *dst, struct dsa_lag *lag)
{
struct dsa_port *dp;
unsigned int id;
dsa_lag_foreach_port(dp, dst, lag_dev)
/* There are remaining users of this mapping */
return;
dsa_lags_foreach_id(id, dst) {
if (dsa_lag_dev(dst, id) == lag_dev) {
if (dsa_lag_by_id(dst, id) == lag) {
dst->lags[id - 1] = NULL;
lag->id = 0;
break;
}
}
}
struct dsa_lag *dsa_tree_lag_find(struct dsa_switch_tree *dst,
const struct net_device *lag_dev)
{
struct dsa_port *dp;
list_for_each_entry(dp, &dst->ports, list)
if (dsa_port_lag_dev_get(dp) == lag_dev)
return dp->lag;
return NULL;
}
struct dsa_bridge *dsa_tree_bridge_find(struct dsa_switch_tree *dst,
const struct net_device *br)
{

View File

@ -76,7 +76,7 @@ struct dsa_notifier_mdb_info {
/* DSA_NOTIFIER_LAG_* */
struct dsa_notifier_lag_info {
struct net_device *lag_dev;
struct dsa_lag lag;
int sw_index;
int port;
@ -487,8 +487,10 @@ int dsa_switch_register_notifier(struct dsa_switch *ds);
void dsa_switch_unregister_notifier(struct dsa_switch *ds);
/* dsa2.c */
void dsa_lag_map(struct dsa_switch_tree *dst, struct net_device *lag_dev);
void dsa_lag_unmap(struct dsa_switch_tree *dst, struct net_device *lag_dev);
void dsa_lag_map(struct dsa_switch_tree *dst, struct dsa_lag *lag);
void dsa_lag_unmap(struct dsa_switch_tree *dst, struct dsa_lag *lag);
struct dsa_lag *dsa_tree_lag_find(struct dsa_switch_tree *dst,
const struct net_device *lag_dev);
int dsa_tree_notify(struct dsa_switch_tree *dst, unsigned long e, void *v);
int dsa_broadcast(unsigned long e, void *v);
int dsa_tree_change_tag_proto(struct dsa_switch_tree *dst,

View File

@ -429,7 +429,7 @@ int dsa_port_lag_change(struct dsa_port *dp,
};
bool tx_enabled;
if (!dp->lag_dev)
if (!dp->lag)
return 0;
/* On statically configured aggregates (e.g. loadbalance
@ -447,6 +447,45 @@ int dsa_port_lag_change(struct dsa_port *dp,
return dsa_port_notify(dp, DSA_NOTIFIER_LAG_CHANGE, &info);
}
static int dsa_port_lag_create(struct dsa_port *dp,
struct net_device *lag_dev)
{
struct dsa_switch *ds = dp->ds;
struct dsa_lag *lag;
lag = dsa_tree_lag_find(ds->dst, lag_dev);
if (lag) {
refcount_inc(&lag->refcount);
dp->lag = lag;
return 0;
}
lag = kzalloc(sizeof(*lag), GFP_KERNEL);
if (!lag)
return -ENOMEM;
refcount_set(&lag->refcount, 1);
lag->dev = lag_dev;
dsa_lag_map(ds->dst, lag);
dp->lag = lag;
return 0;
}
static void dsa_port_lag_destroy(struct dsa_port *dp)
{
struct dsa_lag *lag = dp->lag;
dp->lag = NULL;
dp->lag_tx_enabled = false;
if (!refcount_dec_and_test(&lag->refcount))
return;
dsa_lag_unmap(dp->ds->dst, lag);
kfree(lag);
}
int dsa_port_lag_join(struct dsa_port *dp, struct net_device *lag_dev,
struct netdev_lag_upper_info *uinfo,
struct netlink_ext_ack *extack)
@ -454,15 +493,16 @@ int dsa_port_lag_join(struct dsa_port *dp, struct net_device *lag_dev,
struct dsa_notifier_lag_info info = {
.sw_index = dp->ds->index,
.port = dp->index,
.lag_dev = lag_dev,
.info = uinfo,
};
struct net_device *bridge_dev;
int err;
dsa_lag_map(dp->ds->dst, lag_dev);
dp->lag_dev = lag_dev;
err = dsa_port_lag_create(dp, lag_dev);
if (err)
goto err_lag_create;
info.lag = *dp->lag;
err = dsa_port_notify(dp, DSA_NOTIFIER_LAG_JOIN, &info);
if (err)
goto err_lag_join;
@ -480,8 +520,8 @@ int dsa_port_lag_join(struct dsa_port *dp, struct net_device *lag_dev,
err_bridge_join:
dsa_port_notify(dp, DSA_NOTIFIER_LAG_LEAVE, &info);
err_lag_join:
dp->lag_dev = NULL;
dsa_lag_unmap(dp->ds->dst, lag_dev);
dsa_port_lag_destroy(dp);
err_lag_create:
return err;
}
@ -499,11 +539,10 @@ void dsa_port_lag_leave(struct dsa_port *dp, struct net_device *lag_dev)
struct dsa_notifier_lag_info info = {
.sw_index = dp->ds->index,
.port = dp->index,
.lag_dev = lag_dev,
};
int err;
if (!dp->lag_dev)
if (!dp->lag)
return;
/* Port might have been part of a LAG that in turn was
@ -512,16 +551,15 @@ void dsa_port_lag_leave(struct dsa_port *dp, struct net_device *lag_dev)
if (br)
dsa_port_bridge_leave(dp, br);
dp->lag_tx_enabled = false;
dp->lag_dev = NULL;
info.lag = *dp->lag;
dsa_port_lag_destroy(dp);
err = dsa_port_notify(dp, DSA_NOTIFIER_LAG_LEAVE, &info);
if (err)
dev_err(dp->ds->dev,
"port %d failed to notify DSA_NOTIFIER_LAG_LEAVE: %pe\n",
dp->index, ERR_PTR(err));
dsa_lag_unmap(dp->ds->dst, lag_dev);
}
/* Must be called under rcu_read_lock() */

View File

@ -2134,7 +2134,7 @@ dsa_slave_lag_changeupper(struct net_device *dev,
continue;
dp = dsa_slave_to_port(lower);
if (!dp->lag_dev)
if (!dp->lag)
/* Software LAG */
continue;
@ -2163,7 +2163,7 @@ dsa_slave_lag_prechangeupper(struct net_device *dev,
continue;
dp = dsa_slave_to_port(lower);
if (!dp->lag_dev)
if (!dp->lag)
/* Software LAG */
continue;

View File

@ -468,12 +468,12 @@ static int dsa_switch_lag_join(struct dsa_switch *ds,
struct dsa_notifier_lag_info *info)
{
if (ds->index == info->sw_index && ds->ops->port_lag_join)
return ds->ops->port_lag_join(ds, info->port, info->lag_dev,
return ds->ops->port_lag_join(ds, info->port, info->lag,
info->info);
if (ds->index != info->sw_index && ds->ops->crosschip_lag_join)
return ds->ops->crosschip_lag_join(ds, info->sw_index,
info->port, info->lag_dev,
info->port, info->lag,
info->info);
return -EOPNOTSUPP;
@ -483,11 +483,11 @@ static int dsa_switch_lag_leave(struct dsa_switch *ds,
struct dsa_notifier_lag_info *info)
{
if (ds->index == info->sw_index && ds->ops->port_lag_leave)
return ds->ops->port_lag_leave(ds, info->port, info->lag_dev);
return ds->ops->port_lag_leave(ds, info->port, info->lag);
if (ds->index != info->sw_index && ds->ops->crosschip_lag_leave)
return ds->ops->crosschip_lag_leave(ds, info->sw_index,
info->port, info->lag_dev);
info->port, info->lag);
return -EOPNOTSUPP;
}

View File

@ -246,12 +246,14 @@ static struct sk_buff *dsa_rcv_ll(struct sk_buff *skb, struct net_device *dev,
if (trunk) {
struct dsa_port *cpu_dp = dev->dsa_ptr;
struct dsa_lag *lag;
/* The exact source port is not available in the tag,
* so we inject the frame directly on the upper
* team/bond.
*/
skb->dev = dsa_lag_dev(cpu_dp->dst, source_port + 1);
lag = dsa_lag_by_id(cpu_dp->dst, source_port + 1);
skb->dev = lag ? lag->dev : NULL;
} else {
skb->dev = dsa_master_find_slave(dev, source_device,
source_port);