2018-03-14 23:55:54 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
/*
|
|
|
|
* DPAA2 Ethernet Switch driver
|
|
|
|
*
|
|
|
|
* Copyright 2014-2016 Freescale Semiconductor Inc.
|
2021-03-10 20:14:39 +08:00
|
|
|
* Copyright 2017-2021 NXP
|
2018-03-14 23:55:54 +08:00
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/module.h>
|
|
|
|
|
|
|
|
#include <linux/interrupt.h>
|
|
|
|
#include <linux/msi.h>
|
|
|
|
#include <linux/kthread.h>
|
|
|
|
#include <linux/workqueue.h>
|
2021-03-10 20:14:44 +08:00
|
|
|
#include <linux/iommu.h>
|
2018-03-14 23:55:54 +08:00
|
|
|
|
|
|
|
#include <linux/fsl/mc.h>
|
|
|
|
|
2021-03-10 20:14:52 +08:00
|
|
|
#include "dpaa2-switch.h"
|
2018-03-14 23:55:54 +08:00
|
|
|
|
|
|
|
/* Minimal supported DPSW version */
|
|
|
|
#define DPSW_MIN_VER_MAJOR 8
|
2021-03-10 20:14:41 +08:00
|
|
|
#define DPSW_MIN_VER_MINOR 9
|
2018-03-14 23:55:54 +08:00
|
|
|
|
|
|
|
#define DEFAULT_VLAN_ID 1
|
|
|
|
|
staging: dpaa2-switch: properly setup switching domains
Until now, the DPAA2 switch was not capable to properly setup its
switching domains depending on the existence, or lack thereof, of a
upper bridge device. This meant that all switch ports of a DPSW object
were switching by default even though they were not under the same
bridge device.
Another issue was the inability to actually add the CPU in the flooding
domains (broadcast, unknown unicast etc) of a particular switch port.
This meant that a simple ping on a switch interface was not possible
since no broadcast ARP frame would actually reach the CPU queues.
This patch tries to fix exactly these problems by:
* Creating and managing a FDB table for each flooding domain. This means
that when a switch interface is not bridged it will use its own FDB
table. While in bridged mode all DPAA2 switch interfaces under the
same upper will use the same FDB table, thus leverage the same FDB
entries.
* Adding a new MC firmware command - dpsw_set_egress_flood() - through
which the driver can setup the flooding domains as needed. For
example, when the switch interface is standalone, thus not in a
bridge with any other DPAA2 switch port, it will setup its broadcast
and unknown unicast flooding domains to only include the control
interface (the queues that reach the CPU and the driver can dequeue
from). This flooding domain changes when the interface joins a bridge
and is configured to include, beside the control interface, all other
DPAA2 switch interfaces.
We impose a minimum limit of FDB tables available equal to the number of
switch interfaces so that we guarantee that, in the maximal
configuration - all interfaces are standalone, each switch port will
have a private FDB table. At the same time, we only probe DPSW objects
that have the flooding and broadcast replicators configured to be per
FDB (DPSW_*_PER_FDB). Without this, the dpaa2-switch driver would not
be able to configure multiple switching domains.
At probe time, a FDB table will be allocated for each port. At a bridge
join event, the switch port will either continue to use the current FDB
table (if it's the first dpaa2-switch port to join that bridge) or will
switch to use the FDB table associated with the port that it's already
under the bridge. If a FDB switch is necessary, the private FDB table
which was previously used will be returned to the pool of unused FDBs.
Upon a bridge leave, the switch port needs a private FDB table thus it
will search and get the first unused FDB table. This way, all the other
ports remaining under the bridge will continue to use the same FDB
table.
Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
Reviewed-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-03-10 20:14:47 +08:00
|
|
|
static u16 dpaa2_switch_port_get_fdb_id(struct ethsw_port_priv *port_priv)
|
|
|
|
{
|
|
|
|
return port_priv->fdb->fdb_id;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct dpaa2_switch_fdb *dpaa2_switch_fdb_get_unused(struct ethsw_core *ethsw)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < ethsw->sw_attr.num_ifs; i++)
|
|
|
|
if (!ethsw->fdbs[i].in_use)
|
|
|
|
return ðsw->fdbs[i];
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static u16 dpaa2_switch_port_set_fdb(struct ethsw_port_priv *port_priv,
|
|
|
|
struct net_device *bridge_dev)
|
|
|
|
{
|
|
|
|
struct ethsw_port_priv *other_port_priv = NULL;
|
|
|
|
struct dpaa2_switch_fdb *fdb;
|
|
|
|
struct net_device *other_dev;
|
|
|
|
struct list_head *iter;
|
|
|
|
|
|
|
|
/* If we leave a bridge (bridge_dev is NULL), find an unused
|
|
|
|
* FDB and use that.
|
|
|
|
*/
|
|
|
|
if (!bridge_dev) {
|
|
|
|
fdb = dpaa2_switch_fdb_get_unused(port_priv->ethsw_data);
|
|
|
|
|
|
|
|
/* If there is no unused FDB, we must be the last port that
|
|
|
|
* leaves the last bridge, all the others are standalone. We
|
|
|
|
* can just keep the FDB that we already have.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (!fdb) {
|
|
|
|
port_priv->fdb->bridge_dev = NULL;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
port_priv->fdb = fdb;
|
|
|
|
port_priv->fdb->in_use = true;
|
|
|
|
port_priv->fdb->bridge_dev = NULL;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* The below call to netdev_for_each_lower_dev() demands the RTNL lock
|
|
|
|
* being held. Assert on it so that it's easier to catch new code
|
|
|
|
* paths that reach this point without the RTNL lock.
|
|
|
|
*/
|
|
|
|
ASSERT_RTNL();
|
|
|
|
|
|
|
|
/* If part of a bridge, use the FDB of the first dpaa2 switch interface
|
|
|
|
* to be present in that bridge
|
|
|
|
*/
|
|
|
|
netdev_for_each_lower_dev(bridge_dev, other_dev, iter) {
|
2021-03-10 20:14:48 +08:00
|
|
|
if (!dpaa2_switch_port_dev_check(other_dev))
|
staging: dpaa2-switch: properly setup switching domains
Until now, the DPAA2 switch was not capable to properly setup its
switching domains depending on the existence, or lack thereof, of a
upper bridge device. This meant that all switch ports of a DPSW object
were switching by default even though they were not under the same
bridge device.
Another issue was the inability to actually add the CPU in the flooding
domains (broadcast, unknown unicast etc) of a particular switch port.
This meant that a simple ping on a switch interface was not possible
since no broadcast ARP frame would actually reach the CPU queues.
This patch tries to fix exactly these problems by:
* Creating and managing a FDB table for each flooding domain. This means
that when a switch interface is not bridged it will use its own FDB
table. While in bridged mode all DPAA2 switch interfaces under the
same upper will use the same FDB table, thus leverage the same FDB
entries.
* Adding a new MC firmware command - dpsw_set_egress_flood() - through
which the driver can setup the flooding domains as needed. For
example, when the switch interface is standalone, thus not in a
bridge with any other DPAA2 switch port, it will setup its broadcast
and unknown unicast flooding domains to only include the control
interface (the queues that reach the CPU and the driver can dequeue
from). This flooding domain changes when the interface joins a bridge
and is configured to include, beside the control interface, all other
DPAA2 switch interfaces.
We impose a minimum limit of FDB tables available equal to the number of
switch interfaces so that we guarantee that, in the maximal
configuration - all interfaces are standalone, each switch port will
have a private FDB table. At the same time, we only probe DPSW objects
that have the flooding and broadcast replicators configured to be per
FDB (DPSW_*_PER_FDB). Without this, the dpaa2-switch driver would not
be able to configure multiple switching domains.
At probe time, a FDB table will be allocated for each port. At a bridge
join event, the switch port will either continue to use the current FDB
table (if it's the first dpaa2-switch port to join that bridge) or will
switch to use the FDB table associated with the port that it's already
under the bridge. If a FDB switch is necessary, the private FDB table
which was previously used will be returned to the pool of unused FDBs.
Upon a bridge leave, the switch port needs a private FDB table thus it
will search and get the first unused FDB table. This way, all the other
ports remaining under the bridge will continue to use the same FDB
table.
Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
Reviewed-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-03-10 20:14:47 +08:00
|
|
|
continue;
|
|
|
|
|
|
|
|
if (other_dev == port_priv->netdev)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
other_port_priv = netdev_priv(other_dev);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* The current port is about to change its FDB to the one used by the
|
|
|
|
* first port that joined the bridge.
|
|
|
|
*/
|
|
|
|
if (other_port_priv) {
|
|
|
|
/* The previous FDB is about to become unused, since the
|
|
|
|
* interface is no longer standalone.
|
|
|
|
*/
|
|
|
|
port_priv->fdb->in_use = false;
|
|
|
|
port_priv->fdb->bridge_dev = NULL;
|
|
|
|
|
|
|
|
/* Get a reference to the new FDB */
|
|
|
|
port_priv->fdb = other_port_priv->fdb;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Keep track of the new upper bridge device */
|
|
|
|
port_priv->fdb->bridge_dev = bridge_dev;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-03-23 04:58:55 +08:00
|
|
|
static void dpaa2_switch_fdb_get_flood_cfg(struct ethsw_core *ethsw, u16 fdb_id,
|
|
|
|
enum dpsw_flood_type type,
|
|
|
|
struct dpsw_egress_flood_cfg *cfg)
|
2021-03-23 04:58:54 +08:00
|
|
|
{
|
|
|
|
int i = 0, j;
|
2021-03-23 04:58:55 +08:00
|
|
|
|
|
|
|
memset(cfg, 0, sizeof(*cfg));
|
2021-03-23 04:58:54 +08:00
|
|
|
|
|
|
|
/* Add all the DPAA2 switch ports found in the same bridging domain to
|
|
|
|
* the egress flooding domain
|
|
|
|
*/
|
2021-03-23 04:58:55 +08:00
|
|
|
for (j = 0; j < ethsw->sw_attr.num_ifs; j++) {
|
|
|
|
if (!ethsw->ports[j])
|
|
|
|
continue;
|
|
|
|
if (ethsw->ports[j]->fdb->fdb_id != fdb_id)
|
|
|
|
continue;
|
|
|
|
|
2021-03-23 04:58:57 +08:00
|
|
|
if (type == DPSW_BROADCAST && ethsw->ports[j]->bcast_flood)
|
|
|
|
cfg->if_id[i++] = ethsw->ports[j]->idx;
|
2021-03-23 04:58:58 +08:00
|
|
|
else if (type == DPSW_FLOODING && ethsw->ports[j]->ucast_flood)
|
2021-03-23 04:58:57 +08:00
|
|
|
cfg->if_id[i++] = ethsw->ports[j]->idx;
|
2021-03-23 04:58:55 +08:00
|
|
|
}
|
2021-03-23 04:58:54 +08:00
|
|
|
|
|
|
|
/* Add the CTRL interface to the egress flooding domain */
|
2021-03-23 04:58:55 +08:00
|
|
|
cfg->if_id[i++] = ethsw->sw_attr.num_ifs;
|
|
|
|
|
|
|
|
cfg->fdb_id = fdb_id;
|
|
|
|
cfg->flood_type = type;
|
|
|
|
cfg->num_ifs = i;
|
|
|
|
}
|
2021-03-23 04:58:54 +08:00
|
|
|
|
2021-03-23 04:58:55 +08:00
|
|
|
static int dpaa2_switch_fdb_set_egress_flood(struct ethsw_core *ethsw, u16 fdb_id)
|
|
|
|
{
|
|
|
|
struct dpsw_egress_flood_cfg flood_cfg;
|
|
|
|
int err;
|
2021-03-23 04:58:54 +08:00
|
|
|
|
|
|
|
/* Setup broadcast flooding domain */
|
2021-03-23 04:58:55 +08:00
|
|
|
dpaa2_switch_fdb_get_flood_cfg(ethsw, fdb_id, DPSW_BROADCAST, &flood_cfg);
|
2021-03-23 04:58:54 +08:00
|
|
|
err = dpsw_set_egress_flood(ethsw->mc_io, 0, ethsw->dpsw_handle,
|
|
|
|
&flood_cfg);
|
|
|
|
if (err) {
|
|
|
|
dev_err(ethsw->dev, "dpsw_set_egress_flood() = %d\n", err);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Setup unknown flooding domain */
|
2021-03-23 04:58:55 +08:00
|
|
|
dpaa2_switch_fdb_get_flood_cfg(ethsw, fdb_id, DPSW_FLOODING, &flood_cfg);
|
2021-03-23 04:58:54 +08:00
|
|
|
err = dpsw_set_egress_flood(ethsw->mc_io, 0, ethsw->dpsw_handle,
|
|
|
|
&flood_cfg);
|
|
|
|
if (err) {
|
|
|
|
dev_err(ethsw->dev, "dpsw_set_egress_flood() = %d\n", err);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-03-10 20:14:44 +08:00
|
|
|
static void *dpaa2_iova_to_virt(struct iommu_domain *domain,
|
|
|
|
dma_addr_t iova_addr)
|
|
|
|
{
|
|
|
|
phys_addr_t phys_addr;
|
|
|
|
|
|
|
|
phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr;
|
|
|
|
|
|
|
|
return phys_to_virt(phys_addr);
|
|
|
|
}
|
|
|
|
|
staging: dpaa2-switch: properly setup switching domains
Until now, the DPAA2 switch was not capable to properly setup its
switching domains depending on the existence, or lack thereof, of a
upper bridge device. This meant that all switch ports of a DPSW object
were switching by default even though they were not under the same
bridge device.
Another issue was the inability to actually add the CPU in the flooding
domains (broadcast, unknown unicast etc) of a particular switch port.
This meant that a simple ping on a switch interface was not possible
since no broadcast ARP frame would actually reach the CPU queues.
This patch tries to fix exactly these problems by:
* Creating and managing a FDB table for each flooding domain. This means
that when a switch interface is not bridged it will use its own FDB
table. While in bridged mode all DPAA2 switch interfaces under the
same upper will use the same FDB table, thus leverage the same FDB
entries.
* Adding a new MC firmware command - dpsw_set_egress_flood() - through
which the driver can setup the flooding domains as needed. For
example, when the switch interface is standalone, thus not in a
bridge with any other DPAA2 switch port, it will setup its broadcast
and unknown unicast flooding domains to only include the control
interface (the queues that reach the CPU and the driver can dequeue
from). This flooding domain changes when the interface joins a bridge
and is configured to include, beside the control interface, all other
DPAA2 switch interfaces.
We impose a minimum limit of FDB tables available equal to the number of
switch interfaces so that we guarantee that, in the maximal
configuration - all interfaces are standalone, each switch port will
have a private FDB table. At the same time, we only probe DPSW objects
that have the flooding and broadcast replicators configured to be per
FDB (DPSW_*_PER_FDB). Without this, the dpaa2-switch driver would not
be able to configure multiple switching domains.
At probe time, a FDB table will be allocated for each port. At a bridge
join event, the switch port will either continue to use the current FDB
table (if it's the first dpaa2-switch port to join that bridge) or will
switch to use the FDB table associated with the port that it's already
under the bridge. If a FDB switch is necessary, the private FDB table
which was previously used will be returned to the pool of unused FDBs.
Upon a bridge leave, the switch port needs a private FDB table thus it
will search and get the first unused FDB table. This way, all the other
ports remaining under the bridge will continue to use the same FDB
table.
Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
Reviewed-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-03-10 20:14:47 +08:00
|
|
|
static int dpaa2_switch_add_vlan(struct ethsw_port_priv *port_priv, u16 vid)
|
2018-03-14 23:55:54 +08:00
|
|
|
{
|
staging: dpaa2-switch: properly setup switching domains
Until now, the DPAA2 switch was not capable to properly setup its
switching domains depending on the existence, or lack thereof, of a
upper bridge device. This meant that all switch ports of a DPSW object
were switching by default even though they were not under the same
bridge device.
Another issue was the inability to actually add the CPU in the flooding
domains (broadcast, unknown unicast etc) of a particular switch port.
This meant that a simple ping on a switch interface was not possible
since no broadcast ARP frame would actually reach the CPU queues.
This patch tries to fix exactly these problems by:
* Creating and managing a FDB table for each flooding domain. This means
that when a switch interface is not bridged it will use its own FDB
table. While in bridged mode all DPAA2 switch interfaces under the
same upper will use the same FDB table, thus leverage the same FDB
entries.
* Adding a new MC firmware command - dpsw_set_egress_flood() - through
which the driver can setup the flooding domains as needed. For
example, when the switch interface is standalone, thus not in a
bridge with any other DPAA2 switch port, it will setup its broadcast
and unknown unicast flooding domains to only include the control
interface (the queues that reach the CPU and the driver can dequeue
from). This flooding domain changes when the interface joins a bridge
and is configured to include, beside the control interface, all other
DPAA2 switch interfaces.
We impose a minimum limit of FDB tables available equal to the number of
switch interfaces so that we guarantee that, in the maximal
configuration - all interfaces are standalone, each switch port will
have a private FDB table. At the same time, we only probe DPSW objects
that have the flooding and broadcast replicators configured to be per
FDB (DPSW_*_PER_FDB). Without this, the dpaa2-switch driver would not
be able to configure multiple switching domains.
At probe time, a FDB table will be allocated for each port. At a bridge
join event, the switch port will either continue to use the current FDB
table (if it's the first dpaa2-switch port to join that bridge) or will
switch to use the FDB table associated with the port that it's already
under the bridge. If a FDB switch is necessary, the private FDB table
which was previously used will be returned to the pool of unused FDBs.
Upon a bridge leave, the switch port needs a private FDB table thus it
will search and get the first unused FDB table. This way, all the other
ports remaining under the bridge will continue to use the same FDB
table.
Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
Reviewed-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-03-10 20:14:47 +08:00
|
|
|
struct ethsw_core *ethsw = port_priv->ethsw_data;
|
|
|
|
struct dpsw_vlan_cfg vcfg = {0};
|
2018-03-14 23:55:54 +08:00
|
|
|
int err;
|
|
|
|
|
staging: dpaa2-switch: properly setup switching domains
Until now, the DPAA2 switch was not capable to properly setup its
switching domains depending on the existence, or lack thereof, of a
upper bridge device. This meant that all switch ports of a DPSW object
were switching by default even though they were not under the same
bridge device.
Another issue was the inability to actually add the CPU in the flooding
domains (broadcast, unknown unicast etc) of a particular switch port.
This meant that a simple ping on a switch interface was not possible
since no broadcast ARP frame would actually reach the CPU queues.
This patch tries to fix exactly these problems by:
* Creating and managing a FDB table for each flooding domain. This means
that when a switch interface is not bridged it will use its own FDB
table. While in bridged mode all DPAA2 switch interfaces under the
same upper will use the same FDB table, thus leverage the same FDB
entries.
* Adding a new MC firmware command - dpsw_set_egress_flood() - through
which the driver can setup the flooding domains as needed. For
example, when the switch interface is standalone, thus not in a
bridge with any other DPAA2 switch port, it will setup its broadcast
and unknown unicast flooding domains to only include the control
interface (the queues that reach the CPU and the driver can dequeue
from). This flooding domain changes when the interface joins a bridge
and is configured to include, beside the control interface, all other
DPAA2 switch interfaces.
We impose a minimum limit of FDB tables available equal to the number of
switch interfaces so that we guarantee that, in the maximal
configuration - all interfaces are standalone, each switch port will
have a private FDB table. At the same time, we only probe DPSW objects
that have the flooding and broadcast replicators configured to be per
FDB (DPSW_*_PER_FDB). Without this, the dpaa2-switch driver would not
be able to configure multiple switching domains.
At probe time, a FDB table will be allocated for each port. At a bridge
join event, the switch port will either continue to use the current FDB
table (if it's the first dpaa2-switch port to join that bridge) or will
switch to use the FDB table associated with the port that it's already
under the bridge. If a FDB switch is necessary, the private FDB table
which was previously used will be returned to the pool of unused FDBs.
Upon a bridge leave, the switch port needs a private FDB table thus it
will search and get the first unused FDB table. This way, all the other
ports remaining under the bridge will continue to use the same FDB
table.
Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
Reviewed-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-03-10 20:14:47 +08:00
|
|
|
vcfg.fdb_id = dpaa2_switch_port_get_fdb_id(port_priv);
|
2018-03-14 23:55:54 +08:00
|
|
|
err = dpsw_vlan_add(ethsw->mc_io, 0,
|
|
|
|
ethsw->dpsw_handle, vid, &vcfg);
|
|
|
|
if (err) {
|
|
|
|
dev_err(ethsw->dev, "dpsw_vlan_add err %d\n", err);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
ethsw->vlans[vid] = ETHSW_VLAN_MEMBER;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-10-09 23:30:00 +08:00
|
|
|
static bool dpaa2_switch_port_is_up(struct ethsw_port_priv *port_priv)
|
2020-07-21 17:19:18 +08:00
|
|
|
{
|
|
|
|
struct net_device *netdev = port_priv->netdev;
|
|
|
|
struct dpsw_link_state state;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = dpsw_if_get_link_state(port_priv->ethsw_data->mc_io, 0,
|
|
|
|
port_priv->ethsw_data->dpsw_handle,
|
|
|
|
port_priv->idx, &state);
|
|
|
|
if (err) {
|
|
|
|
netdev_err(netdev, "dpsw_if_get_link_state() err %d\n", err);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
WARN_ONCE(state.up > 1, "Garbage read into link_state");
|
|
|
|
|
|
|
|
return state.up ? true : false;
|
|
|
|
}
|
|
|
|
|
2020-10-09 23:30:00 +08:00
|
|
|
static int dpaa2_switch_port_set_pvid(struct ethsw_port_priv *port_priv, u16 pvid)
|
2018-03-14 23:55:54 +08:00
|
|
|
{
|
|
|
|
struct ethsw_core *ethsw = port_priv->ethsw_data;
|
|
|
|
struct net_device *netdev = port_priv->netdev;
|
2018-03-28 18:50:20 +08:00
|
|
|
struct dpsw_tci_cfg tci_cfg = { 0 };
|
2020-07-14 21:34:28 +08:00
|
|
|
bool up;
|
2018-03-14 23:55:54 +08:00
|
|
|
int err, ret;
|
|
|
|
|
2018-03-28 18:50:20 +08:00
|
|
|
err = dpsw_if_get_tci(ethsw->mc_io, 0, ethsw->dpsw_handle,
|
|
|
|
port_priv->idx, &tci_cfg);
|
|
|
|
if (err) {
|
|
|
|
netdev_err(netdev, "dpsw_if_get_tci err %d\n", err);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
tci_cfg.vlan_id = pvid;
|
|
|
|
|
2018-03-14 23:55:54 +08:00
|
|
|
/* Interface needs to be down to change PVID */
|
2020-10-09 23:30:00 +08:00
|
|
|
up = dpaa2_switch_port_is_up(port_priv);
|
2020-07-14 21:34:28 +08:00
|
|
|
if (up) {
|
2018-03-14 23:55:54 +08:00
|
|
|
err = dpsw_if_disable(ethsw->mc_io, 0,
|
|
|
|
ethsw->dpsw_handle,
|
|
|
|
port_priv->idx);
|
|
|
|
if (err) {
|
|
|
|
netdev_err(netdev, "dpsw_if_disable err %d\n", err);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
err = dpsw_if_set_tci(ethsw->mc_io, 0, ethsw->dpsw_handle,
|
2018-03-28 18:50:20 +08:00
|
|
|
port_priv->idx, &tci_cfg);
|
2018-03-14 23:55:54 +08:00
|
|
|
if (err) {
|
|
|
|
netdev_err(netdev, "dpsw_if_set_tci err %d\n", err);
|
|
|
|
goto set_tci_error;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Delete previous PVID info and mark the new one */
|
2018-03-28 18:50:20 +08:00
|
|
|
port_priv->vlans[port_priv->pvid] &= ~ETHSW_VLAN_PVID;
|
|
|
|
port_priv->vlans[pvid] |= ETHSW_VLAN_PVID;
|
|
|
|
port_priv->pvid = pvid;
|
2018-03-14 23:55:54 +08:00
|
|
|
|
|
|
|
set_tci_error:
|
2020-07-14 21:34:28 +08:00
|
|
|
if (up) {
|
2018-03-14 23:55:54 +08:00
|
|
|
ret = dpsw_if_enable(ethsw->mc_io, 0,
|
|
|
|
ethsw->dpsw_handle,
|
|
|
|
port_priv->idx);
|
|
|
|
if (ret) {
|
|
|
|
netdev_err(netdev, "dpsw_if_enable err %d\n", ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2020-10-09 23:30:00 +08:00
|
|
|
static int dpaa2_switch_port_add_vlan(struct ethsw_port_priv *port_priv,
|
|
|
|
u16 vid, u16 flags)
|
2018-03-14 23:55:54 +08:00
|
|
|
{
|
|
|
|
struct ethsw_core *ethsw = port_priv->ethsw_data;
|
|
|
|
struct net_device *netdev = port_priv->netdev;
|
staging: dpaa2-switch: properly setup switching domains
Until now, the DPAA2 switch was not capable to properly setup its
switching domains depending on the existence, or lack thereof, of a
upper bridge device. This meant that all switch ports of a DPSW object
were switching by default even though they were not under the same
bridge device.
Another issue was the inability to actually add the CPU in the flooding
domains (broadcast, unknown unicast etc) of a particular switch port.
This meant that a simple ping on a switch interface was not possible
since no broadcast ARP frame would actually reach the CPU queues.
This patch tries to fix exactly these problems by:
* Creating and managing a FDB table for each flooding domain. This means
that when a switch interface is not bridged it will use its own FDB
table. While in bridged mode all DPAA2 switch interfaces under the
same upper will use the same FDB table, thus leverage the same FDB
entries.
* Adding a new MC firmware command - dpsw_set_egress_flood() - through
which the driver can setup the flooding domains as needed. For
example, when the switch interface is standalone, thus not in a
bridge with any other DPAA2 switch port, it will setup its broadcast
and unknown unicast flooding domains to only include the control
interface (the queues that reach the CPU and the driver can dequeue
from). This flooding domain changes when the interface joins a bridge
and is configured to include, beside the control interface, all other
DPAA2 switch interfaces.
We impose a minimum limit of FDB tables available equal to the number of
switch interfaces so that we guarantee that, in the maximal
configuration - all interfaces are standalone, each switch port will
have a private FDB table. At the same time, we only probe DPSW objects
that have the flooding and broadcast replicators configured to be per
FDB (DPSW_*_PER_FDB). Without this, the dpaa2-switch driver would not
be able to configure multiple switching domains.
At probe time, a FDB table will be allocated for each port. At a bridge
join event, the switch port will either continue to use the current FDB
table (if it's the first dpaa2-switch port to join that bridge) or will
switch to use the FDB table associated with the port that it's already
under the bridge. If a FDB switch is necessary, the private FDB table
which was previously used will be returned to the pool of unused FDBs.
Upon a bridge leave, the switch port needs a private FDB table thus it
will search and get the first unused FDB table. This way, all the other
ports remaining under the bridge will continue to use the same FDB
table.
Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
Reviewed-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-03-10 20:14:47 +08:00
|
|
|
struct dpsw_vlan_if_cfg vcfg = {0};
|
2018-03-14 23:55:54 +08:00
|
|
|
int err;
|
|
|
|
|
|
|
|
if (port_priv->vlans[vid]) {
|
|
|
|
netdev_warn(netdev, "VLAN %d already configured\n", vid);
|
|
|
|
return -EEXIST;
|
|
|
|
}
|
|
|
|
|
staging: dpaa2-switch: properly setup switching domains
Until now, the DPAA2 switch was not capable to properly setup its
switching domains depending on the existence, or lack thereof, of a
upper bridge device. This meant that all switch ports of a DPSW object
were switching by default even though they were not under the same
bridge device.
Another issue was the inability to actually add the CPU in the flooding
domains (broadcast, unknown unicast etc) of a particular switch port.
This meant that a simple ping on a switch interface was not possible
since no broadcast ARP frame would actually reach the CPU queues.
This patch tries to fix exactly these problems by:
* Creating and managing a FDB table for each flooding domain. This means
that when a switch interface is not bridged it will use its own FDB
table. While in bridged mode all DPAA2 switch interfaces under the
same upper will use the same FDB table, thus leverage the same FDB
entries.
* Adding a new MC firmware command - dpsw_set_egress_flood() - through
which the driver can setup the flooding domains as needed. For
example, when the switch interface is standalone, thus not in a
bridge with any other DPAA2 switch port, it will setup its broadcast
and unknown unicast flooding domains to only include the control
interface (the queues that reach the CPU and the driver can dequeue
from). This flooding domain changes when the interface joins a bridge
and is configured to include, beside the control interface, all other
DPAA2 switch interfaces.
We impose a minimum limit of FDB tables available equal to the number of
switch interfaces so that we guarantee that, in the maximal
configuration - all interfaces are standalone, each switch port will
have a private FDB table. At the same time, we only probe DPSW objects
that have the flooding and broadcast replicators configured to be per
FDB (DPSW_*_PER_FDB). Without this, the dpaa2-switch driver would not
be able to configure multiple switching domains.
At probe time, a FDB table will be allocated for each port. At a bridge
join event, the switch port will either continue to use the current FDB
table (if it's the first dpaa2-switch port to join that bridge) or will
switch to use the FDB table associated with the port that it's already
under the bridge. If a FDB switch is necessary, the private FDB table
which was previously used will be returned to the pool of unused FDBs.
Upon a bridge leave, the switch port needs a private FDB table thus it
will search and get the first unused FDB table. This way, all the other
ports remaining under the bridge will continue to use the same FDB
table.
Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
Reviewed-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-03-10 20:14:47 +08:00
|
|
|
/* If hit, this VLAN rule will lead the packet into the FDB table
|
|
|
|
* specified in the vlan configuration below
|
|
|
|
*/
|
2018-03-14 23:55:54 +08:00
|
|
|
vcfg.num_ifs = 1;
|
|
|
|
vcfg.if_id[0] = port_priv->idx;
|
staging: dpaa2-switch: properly setup switching domains
Until now, the DPAA2 switch was not capable to properly setup its
switching domains depending on the existence, or lack thereof, of a
upper bridge device. This meant that all switch ports of a DPSW object
were switching by default even though they were not under the same
bridge device.
Another issue was the inability to actually add the CPU in the flooding
domains (broadcast, unknown unicast etc) of a particular switch port.
This meant that a simple ping on a switch interface was not possible
since no broadcast ARP frame would actually reach the CPU queues.
This patch tries to fix exactly these problems by:
* Creating and managing a FDB table for each flooding domain. This means
that when a switch interface is not bridged it will use its own FDB
table. While in bridged mode all DPAA2 switch interfaces under the
same upper will use the same FDB table, thus leverage the same FDB
entries.
* Adding a new MC firmware command - dpsw_set_egress_flood() - through
which the driver can setup the flooding domains as needed. For
example, when the switch interface is standalone, thus not in a
bridge with any other DPAA2 switch port, it will setup its broadcast
and unknown unicast flooding domains to only include the control
interface (the queues that reach the CPU and the driver can dequeue
from). This flooding domain changes when the interface joins a bridge
and is configured to include, beside the control interface, all other
DPAA2 switch interfaces.
We impose a minimum limit of FDB tables available equal to the number of
switch interfaces so that we guarantee that, in the maximal
configuration - all interfaces are standalone, each switch port will
have a private FDB table. At the same time, we only probe DPSW objects
that have the flooding and broadcast replicators configured to be per
FDB (DPSW_*_PER_FDB). Without this, the dpaa2-switch driver would not
be able to configure multiple switching domains.
At probe time, a FDB table will be allocated for each port. At a bridge
join event, the switch port will either continue to use the current FDB
table (if it's the first dpaa2-switch port to join that bridge) or will
switch to use the FDB table associated with the port that it's already
under the bridge. If a FDB switch is necessary, the private FDB table
which was previously used will be returned to the pool of unused FDBs.
Upon a bridge leave, the switch port needs a private FDB table thus it
will search and get the first unused FDB table. This way, all the other
ports remaining under the bridge will continue to use the same FDB
table.
Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
Reviewed-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-03-10 20:14:47 +08:00
|
|
|
vcfg.fdb_id = dpaa2_switch_port_get_fdb_id(port_priv);
|
|
|
|
vcfg.options |= DPSW_VLAN_ADD_IF_OPT_FDB_ID;
|
2018-03-14 23:55:54 +08:00
|
|
|
err = dpsw_vlan_add_if(ethsw->mc_io, 0, ethsw->dpsw_handle, vid, &vcfg);
|
|
|
|
if (err) {
|
|
|
|
netdev_err(netdev, "dpsw_vlan_add_if err %d\n", err);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
port_priv->vlans[vid] = ETHSW_VLAN_MEMBER;
|
|
|
|
|
|
|
|
if (flags & BRIDGE_VLAN_INFO_UNTAGGED) {
|
|
|
|
err = dpsw_vlan_add_if_untagged(ethsw->mc_io, 0,
|
|
|
|
ethsw->dpsw_handle,
|
|
|
|
vid, &vcfg);
|
|
|
|
if (err) {
|
|
|
|
netdev_err(netdev,
|
|
|
|
"dpsw_vlan_add_if_untagged err %d\n", err);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
port_priv->vlans[vid] |= ETHSW_VLAN_UNTAGGED;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (flags & BRIDGE_VLAN_INFO_PVID) {
|
2020-10-09 23:30:00 +08:00
|
|
|
err = dpaa2_switch_port_set_pvid(port_priv, vid);
|
2018-03-14 23:55:54 +08:00
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-03-30 22:54:15 +08:00
|
|
|
static enum dpsw_stp_state br_stp_state_to_dpsw(u8 state)
|
|
|
|
{
|
|
|
|
switch (state) {
|
|
|
|
case BR_STATE_DISABLED:
|
|
|
|
return DPSW_STP_STATE_DISABLED;
|
|
|
|
case BR_STATE_LISTENING:
|
|
|
|
return DPSW_STP_STATE_LISTENING;
|
|
|
|
case BR_STATE_LEARNING:
|
|
|
|
return DPSW_STP_STATE_LEARNING;
|
|
|
|
case BR_STATE_FORWARDING:
|
|
|
|
return DPSW_STP_STATE_FORWARDING;
|
|
|
|
case BR_STATE_BLOCKING:
|
|
|
|
return DPSW_STP_STATE_BLOCKING;
|
|
|
|
default:
|
|
|
|
return DPSW_STP_STATE_DISABLED;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-09 23:30:00 +08:00
|
|
|
static int dpaa2_switch_port_set_stp_state(struct ethsw_port_priv *port_priv, u8 state)
|
2018-03-14 23:55:54 +08:00
|
|
|
{
|
2021-03-30 22:54:15 +08:00
|
|
|
struct dpsw_stp_cfg stp_cfg = {0};
|
2018-03-14 23:55:54 +08:00
|
|
|
int err;
|
2020-07-21 17:19:16 +08:00
|
|
|
u16 vid;
|
2018-03-14 23:55:54 +08:00
|
|
|
|
2020-07-14 21:34:28 +08:00
|
|
|
if (!netif_running(port_priv->netdev) || state == port_priv->stp_state)
|
2018-03-14 23:55:54 +08:00
|
|
|
return 0; /* Nothing to do */
|
|
|
|
|
2021-03-30 22:54:15 +08:00
|
|
|
stp_cfg.state = br_stp_state_to_dpsw(state);
|
2020-07-21 17:19:16 +08:00
|
|
|
for (vid = 0; vid <= VLAN_VID_MASK; vid++) {
|
|
|
|
if (port_priv->vlans[vid] & ETHSW_VLAN_MEMBER) {
|
|
|
|
stp_cfg.vlan_id = vid;
|
|
|
|
err = dpsw_if_set_stp(port_priv->ethsw_data->mc_io, 0,
|
|
|
|
port_priv->ethsw_data->dpsw_handle,
|
|
|
|
port_priv->idx, &stp_cfg);
|
|
|
|
if (err) {
|
|
|
|
netdev_err(port_priv->netdev,
|
|
|
|
"dpsw_if_set_stp err %d\n", err);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
2018-03-14 23:55:54 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
port_priv->stp_state = state;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-10-09 23:30:00 +08:00
|
|
|
static int dpaa2_switch_dellink(struct ethsw_core *ethsw, u16 vid)
|
2018-03-14 23:55:54 +08:00
|
|
|
{
|
|
|
|
struct ethsw_port_priv *ppriv_local = NULL;
|
|
|
|
int i, err;
|
|
|
|
|
|
|
|
if (!ethsw->vlans[vid])
|
|
|
|
return -ENOENT;
|
|
|
|
|
|
|
|
err = dpsw_vlan_remove(ethsw->mc_io, 0, ethsw->dpsw_handle, vid);
|
|
|
|
if (err) {
|
|
|
|
dev_err(ethsw->dev, "dpsw_vlan_remove err %d\n", err);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
ethsw->vlans[vid] = 0;
|
|
|
|
|
|
|
|
for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
|
|
|
|
ppriv_local = ethsw->ports[i];
|
|
|
|
ppriv_local->vlans[vid] = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-10-09 23:30:00 +08:00
|
|
|
static int dpaa2_switch_port_fdb_add_uc(struct ethsw_port_priv *port_priv,
|
|
|
|
const unsigned char *addr)
|
2018-03-14 23:55:54 +08:00
|
|
|
{
|
|
|
|
struct dpsw_fdb_unicast_cfg entry = {0};
|
staging: dpaa2-switch: properly setup switching domains
Until now, the DPAA2 switch was not capable to properly setup its
switching domains depending on the existence, or lack thereof, of a
upper bridge device. This meant that all switch ports of a DPSW object
were switching by default even though they were not under the same
bridge device.
Another issue was the inability to actually add the CPU in the flooding
domains (broadcast, unknown unicast etc) of a particular switch port.
This meant that a simple ping on a switch interface was not possible
since no broadcast ARP frame would actually reach the CPU queues.
This patch tries to fix exactly these problems by:
* Creating and managing a FDB table for each flooding domain. This means
that when a switch interface is not bridged it will use its own FDB
table. While in bridged mode all DPAA2 switch interfaces under the
same upper will use the same FDB table, thus leverage the same FDB
entries.
* Adding a new MC firmware command - dpsw_set_egress_flood() - through
which the driver can setup the flooding domains as needed. For
example, when the switch interface is standalone, thus not in a
bridge with any other DPAA2 switch port, it will setup its broadcast
and unknown unicast flooding domains to only include the control
interface (the queues that reach the CPU and the driver can dequeue
from). This flooding domain changes when the interface joins a bridge
and is configured to include, beside the control interface, all other
DPAA2 switch interfaces.
We impose a minimum limit of FDB tables available equal to the number of
switch interfaces so that we guarantee that, in the maximal
configuration - all interfaces are standalone, each switch port will
have a private FDB table. At the same time, we only probe DPSW objects
that have the flooding and broadcast replicators configured to be per
FDB (DPSW_*_PER_FDB). Without this, the dpaa2-switch driver would not
be able to configure multiple switching domains.
At probe time, a FDB table will be allocated for each port. At a bridge
join event, the switch port will either continue to use the current FDB
table (if it's the first dpaa2-switch port to join that bridge) or will
switch to use the FDB table associated with the port that it's already
under the bridge. If a FDB switch is necessary, the private FDB table
which was previously used will be returned to the pool of unused FDBs.
Upon a bridge leave, the switch port needs a private FDB table thus it
will search and get the first unused FDB table. This way, all the other
ports remaining under the bridge will continue to use the same FDB
table.
Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
Reviewed-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-03-10 20:14:47 +08:00
|
|
|
u16 fdb_id;
|
2018-03-14 23:55:54 +08:00
|
|
|
int err;
|
|
|
|
|
|
|
|
entry.if_egress = port_priv->idx;
|
|
|
|
entry.type = DPSW_FDB_ENTRY_STATIC;
|
|
|
|
ether_addr_copy(entry.mac_addr, addr);
|
|
|
|
|
staging: dpaa2-switch: properly setup switching domains
Until now, the DPAA2 switch was not capable to properly setup its
switching domains depending on the existence, or lack thereof, of a
upper bridge device. This meant that all switch ports of a DPSW object
were switching by default even though they were not under the same
bridge device.
Another issue was the inability to actually add the CPU in the flooding
domains (broadcast, unknown unicast etc) of a particular switch port.
This meant that a simple ping on a switch interface was not possible
since no broadcast ARP frame would actually reach the CPU queues.
This patch tries to fix exactly these problems by:
* Creating and managing a FDB table for each flooding domain. This means
that when a switch interface is not bridged it will use its own FDB
table. While in bridged mode all DPAA2 switch interfaces under the
same upper will use the same FDB table, thus leverage the same FDB
entries.
* Adding a new MC firmware command - dpsw_set_egress_flood() - through
which the driver can setup the flooding domains as needed. For
example, when the switch interface is standalone, thus not in a
bridge with any other DPAA2 switch port, it will setup its broadcast
and unknown unicast flooding domains to only include the control
interface (the queues that reach the CPU and the driver can dequeue
from). This flooding domain changes when the interface joins a bridge
and is configured to include, beside the control interface, all other
DPAA2 switch interfaces.
We impose a minimum limit of FDB tables available equal to the number of
switch interfaces so that we guarantee that, in the maximal
configuration - all interfaces are standalone, each switch port will
have a private FDB table. At the same time, we only probe DPSW objects
that have the flooding and broadcast replicators configured to be per
FDB (DPSW_*_PER_FDB). Without this, the dpaa2-switch driver would not
be able to configure multiple switching domains.
At probe time, a FDB table will be allocated for each port. At a bridge
join event, the switch port will either continue to use the current FDB
table (if it's the first dpaa2-switch port to join that bridge) or will
switch to use the FDB table associated with the port that it's already
under the bridge. If a FDB switch is necessary, the private FDB table
which was previously used will be returned to the pool of unused FDBs.
Upon a bridge leave, the switch port needs a private FDB table thus it
will search and get the first unused FDB table. This way, all the other
ports remaining under the bridge will continue to use the same FDB
table.
Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
Reviewed-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-03-10 20:14:47 +08:00
|
|
|
fdb_id = dpaa2_switch_port_get_fdb_id(port_priv);
|
2018-03-14 23:55:54 +08:00
|
|
|
err = dpsw_fdb_add_unicast(port_priv->ethsw_data->mc_io, 0,
|
|
|
|
port_priv->ethsw_data->dpsw_handle,
|
staging: dpaa2-switch: properly setup switching domains
Until now, the DPAA2 switch was not capable to properly setup its
switching domains depending on the existence, or lack thereof, of a
upper bridge device. This meant that all switch ports of a DPSW object
were switching by default even though they were not under the same
bridge device.
Another issue was the inability to actually add the CPU in the flooding
domains (broadcast, unknown unicast etc) of a particular switch port.
This meant that a simple ping on a switch interface was not possible
since no broadcast ARP frame would actually reach the CPU queues.
This patch tries to fix exactly these problems by:
* Creating and managing a FDB table for each flooding domain. This means
that when a switch interface is not bridged it will use its own FDB
table. While in bridged mode all DPAA2 switch interfaces under the
same upper will use the same FDB table, thus leverage the same FDB
entries.
* Adding a new MC firmware command - dpsw_set_egress_flood() - through
which the driver can setup the flooding domains as needed. For
example, when the switch interface is standalone, thus not in a
bridge with any other DPAA2 switch port, it will setup its broadcast
and unknown unicast flooding domains to only include the control
interface (the queues that reach the CPU and the driver can dequeue
from). This flooding domain changes when the interface joins a bridge
and is configured to include, beside the control interface, all other
DPAA2 switch interfaces.
We impose a minimum limit of FDB tables available equal to the number of
switch interfaces so that we guarantee that, in the maximal
configuration - all interfaces are standalone, each switch port will
have a private FDB table. At the same time, we only probe DPSW objects
that have the flooding and broadcast replicators configured to be per
FDB (DPSW_*_PER_FDB). Without this, the dpaa2-switch driver would not
be able to configure multiple switching domains.
At probe time, a FDB table will be allocated for each port. At a bridge
join event, the switch port will either continue to use the current FDB
table (if it's the first dpaa2-switch port to join that bridge) or will
switch to use the FDB table associated with the port that it's already
under the bridge. If a FDB switch is necessary, the private FDB table
which was previously used will be returned to the pool of unused FDBs.
Upon a bridge leave, the switch port needs a private FDB table thus it
will search and get the first unused FDB table. This way, all the other
ports remaining under the bridge will continue to use the same FDB
table.
Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
Reviewed-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-03-10 20:14:47 +08:00
|
|
|
fdb_id, &entry);
|
2018-03-14 23:55:54 +08:00
|
|
|
if (err)
|
|
|
|
netdev_err(port_priv->netdev,
|
|
|
|
"dpsw_fdb_add_unicast err %d\n", err);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2020-10-09 23:30:00 +08:00
|
|
|
static int dpaa2_switch_port_fdb_del_uc(struct ethsw_port_priv *port_priv,
|
|
|
|
const unsigned char *addr)
|
2018-03-14 23:55:54 +08:00
|
|
|
{
|
|
|
|
struct dpsw_fdb_unicast_cfg entry = {0};
|
staging: dpaa2-switch: properly setup switching domains
Until now, the DPAA2 switch was not capable to properly setup its
switching domains depending on the existence, or lack thereof, of a
upper bridge device. This meant that all switch ports of a DPSW object
were switching by default even though they were not under the same
bridge device.
Another issue was the inability to actually add the CPU in the flooding
domains (broadcast, unknown unicast etc) of a particular switch port.
This meant that a simple ping on a switch interface was not possible
since no broadcast ARP frame would actually reach the CPU queues.
This patch tries to fix exactly these problems by:
* Creating and managing a FDB table for each flooding domain. This means
that when a switch interface is not bridged it will use its own FDB
table. While in bridged mode all DPAA2 switch interfaces under the
same upper will use the same FDB table, thus leverage the same FDB
entries.
* Adding a new MC firmware command - dpsw_set_egress_flood() - through
which the driver can setup the flooding domains as needed. For
example, when the switch interface is standalone, thus not in a
bridge with any other DPAA2 switch port, it will setup its broadcast
and unknown unicast flooding domains to only include the control
interface (the queues that reach the CPU and the driver can dequeue
from). This flooding domain changes when the interface joins a bridge
and is configured to include, beside the control interface, all other
DPAA2 switch interfaces.
We impose a minimum limit of FDB tables available equal to the number of
switch interfaces so that we guarantee that, in the maximal
configuration - all interfaces are standalone, each switch port will
have a private FDB table. At the same time, we only probe DPSW objects
that have the flooding and broadcast replicators configured to be per
FDB (DPSW_*_PER_FDB). Without this, the dpaa2-switch driver would not
be able to configure multiple switching domains.
At probe time, a FDB table will be allocated for each port. At a bridge
join event, the switch port will either continue to use the current FDB
table (if it's the first dpaa2-switch port to join that bridge) or will
switch to use the FDB table associated with the port that it's already
under the bridge. If a FDB switch is necessary, the private FDB table
which was previously used will be returned to the pool of unused FDBs.
Upon a bridge leave, the switch port needs a private FDB table thus it
will search and get the first unused FDB table. This way, all the other
ports remaining under the bridge will continue to use the same FDB
table.
Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
Reviewed-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-03-10 20:14:47 +08:00
|
|
|
u16 fdb_id;
|
2018-03-14 23:55:54 +08:00
|
|
|
int err;
|
|
|
|
|
|
|
|
entry.if_egress = port_priv->idx;
|
|
|
|
entry.type = DPSW_FDB_ENTRY_STATIC;
|
|
|
|
ether_addr_copy(entry.mac_addr, addr);
|
|
|
|
|
staging: dpaa2-switch: properly setup switching domains
Until now, the DPAA2 switch was not capable to properly setup its
switching domains depending on the existence, or lack thereof, of a
upper bridge device. This meant that all switch ports of a DPSW object
were switching by default even though they were not under the same
bridge device.
Another issue was the inability to actually add the CPU in the flooding
domains (broadcast, unknown unicast etc) of a particular switch port.
This meant that a simple ping on a switch interface was not possible
since no broadcast ARP frame would actually reach the CPU queues.
This patch tries to fix exactly these problems by:
* Creating and managing a FDB table for each flooding domain. This means
that when a switch interface is not bridged it will use its own FDB
table. While in bridged mode all DPAA2 switch interfaces under the
same upper will use the same FDB table, thus leverage the same FDB
entries.
* Adding a new MC firmware command - dpsw_set_egress_flood() - through
which the driver can setup the flooding domains as needed. For
example, when the switch interface is standalone, thus not in a
bridge with any other DPAA2 switch port, it will setup its broadcast
and unknown unicast flooding domains to only include the control
interface (the queues that reach the CPU and the driver can dequeue
from). This flooding domain changes when the interface joins a bridge
and is configured to include, beside the control interface, all other
DPAA2 switch interfaces.
We impose a minimum limit of FDB tables available equal to the number of
switch interfaces so that we guarantee that, in the maximal
configuration - all interfaces are standalone, each switch port will
have a private FDB table. At the same time, we only probe DPSW objects
that have the flooding and broadcast replicators configured to be per
FDB (DPSW_*_PER_FDB). Without this, the dpaa2-switch driver would not
be able to configure multiple switching domains.
At probe time, a FDB table will be allocated for each port. At a bridge
join event, the switch port will either continue to use the current FDB
table (if it's the first dpaa2-switch port to join that bridge) or will
switch to use the FDB table associated with the port that it's already
under the bridge. If a FDB switch is necessary, the private FDB table
which was previously used will be returned to the pool of unused FDBs.
Upon a bridge leave, the switch port needs a private FDB table thus it
will search and get the first unused FDB table. This way, all the other
ports remaining under the bridge will continue to use the same FDB
table.
Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
Reviewed-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-03-10 20:14:47 +08:00
|
|
|
fdb_id = dpaa2_switch_port_get_fdb_id(port_priv);
|
2018-03-14 23:55:54 +08:00
|
|
|
err = dpsw_fdb_remove_unicast(port_priv->ethsw_data->mc_io, 0,
|
|
|
|
port_priv->ethsw_data->dpsw_handle,
|
staging: dpaa2-switch: properly setup switching domains
Until now, the DPAA2 switch was not capable to properly setup its
switching domains depending on the existence, or lack thereof, of a
upper bridge device. This meant that all switch ports of a DPSW object
were switching by default even though they were not under the same
bridge device.
Another issue was the inability to actually add the CPU in the flooding
domains (broadcast, unknown unicast etc) of a particular switch port.
This meant that a simple ping on a switch interface was not possible
since no broadcast ARP frame would actually reach the CPU queues.
This patch tries to fix exactly these problems by:
* Creating and managing a FDB table for each flooding domain. This means
that when a switch interface is not bridged it will use its own FDB
table. While in bridged mode all DPAA2 switch interfaces under the
same upper will use the same FDB table, thus leverage the same FDB
entries.
* Adding a new MC firmware command - dpsw_set_egress_flood() - through
which the driver can setup the flooding domains as needed. For
example, when the switch interface is standalone, thus not in a
bridge with any other DPAA2 switch port, it will setup its broadcast
and unknown unicast flooding domains to only include the control
interface (the queues that reach the CPU and the driver can dequeue
from). This flooding domain changes when the interface joins a bridge
and is configured to include, beside the control interface, all other
DPAA2 switch interfaces.
We impose a minimum limit of FDB tables available equal to the number of
switch interfaces so that we guarantee that, in the maximal
configuration - all interfaces are standalone, each switch port will
have a private FDB table. At the same time, we only probe DPSW objects
that have the flooding and broadcast replicators configured to be per
FDB (DPSW_*_PER_FDB). Without this, the dpaa2-switch driver would not
be able to configure multiple switching domains.
At probe time, a FDB table will be allocated for each port. At a bridge
join event, the switch port will either continue to use the current FDB
table (if it's the first dpaa2-switch port to join that bridge) or will
switch to use the FDB table associated with the port that it's already
under the bridge. If a FDB switch is necessary, the private FDB table
which was previously used will be returned to the pool of unused FDBs.
Upon a bridge leave, the switch port needs a private FDB table thus it
will search and get the first unused FDB table. This way, all the other
ports remaining under the bridge will continue to use the same FDB
table.
Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
Reviewed-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-03-10 20:14:47 +08:00
|
|
|
fdb_id, &entry);
|
2018-03-14 23:55:54 +08:00
|
|
|
/* Silently discard error for calling multiple times the del command */
|
|
|
|
if (err && err != -ENXIO)
|
|
|
|
netdev_err(port_priv->netdev,
|
|
|
|
"dpsw_fdb_remove_unicast err %d\n", err);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2020-10-09 23:30:00 +08:00
|
|
|
static int dpaa2_switch_port_fdb_add_mc(struct ethsw_port_priv *port_priv,
|
|
|
|
const unsigned char *addr)
|
2018-03-14 23:55:54 +08:00
|
|
|
{
|
|
|
|
struct dpsw_fdb_multicast_cfg entry = {0};
|
staging: dpaa2-switch: properly setup switching domains
Until now, the DPAA2 switch was not capable to properly setup its
switching domains depending on the existence, or lack thereof, of a
upper bridge device. This meant that all switch ports of a DPSW object
were switching by default even though they were not under the same
bridge device.
Another issue was the inability to actually add the CPU in the flooding
domains (broadcast, unknown unicast etc) of a particular switch port.
This meant that a simple ping on a switch interface was not possible
since no broadcast ARP frame would actually reach the CPU queues.
This patch tries to fix exactly these problems by:
* Creating and managing a FDB table for each flooding domain. This means
that when a switch interface is not bridged it will use its own FDB
table. While in bridged mode all DPAA2 switch interfaces under the
same upper will use the same FDB table, thus leverage the same FDB
entries.
* Adding a new MC firmware command - dpsw_set_egress_flood() - through
which the driver can setup the flooding domains as needed. For
example, when the switch interface is standalone, thus not in a
bridge with any other DPAA2 switch port, it will setup its broadcast
and unknown unicast flooding domains to only include the control
interface (the queues that reach the CPU and the driver can dequeue
from). This flooding domain changes when the interface joins a bridge
and is configured to include, beside the control interface, all other
DPAA2 switch interfaces.
We impose a minimum limit of FDB tables available equal to the number of
switch interfaces so that we guarantee that, in the maximal
configuration - all interfaces are standalone, each switch port will
have a private FDB table. At the same time, we only probe DPSW objects
that have the flooding and broadcast replicators configured to be per
FDB (DPSW_*_PER_FDB). Without this, the dpaa2-switch driver would not
be able to configure multiple switching domains.
At probe time, a FDB table will be allocated for each port. At a bridge
join event, the switch port will either continue to use the current FDB
table (if it's the first dpaa2-switch port to join that bridge) or will
switch to use the FDB table associated with the port that it's already
under the bridge. If a FDB switch is necessary, the private FDB table
which was previously used will be returned to the pool of unused FDBs.
Upon a bridge leave, the switch port needs a private FDB table thus it
will search and get the first unused FDB table. This way, all the other
ports remaining under the bridge will continue to use the same FDB
table.
Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
Reviewed-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-03-10 20:14:47 +08:00
|
|
|
u16 fdb_id;
|
2018-03-14 23:55:54 +08:00
|
|
|
int err;
|
|
|
|
|
|
|
|
ether_addr_copy(entry.mac_addr, addr);
|
|
|
|
entry.type = DPSW_FDB_ENTRY_STATIC;
|
|
|
|
entry.num_ifs = 1;
|
|
|
|
entry.if_id[0] = port_priv->idx;
|
|
|
|
|
staging: dpaa2-switch: properly setup switching domains
Until now, the DPAA2 switch was not capable to properly setup its
switching domains depending on the existence, or lack thereof, of a
upper bridge device. This meant that all switch ports of a DPSW object
were switching by default even though they were not under the same
bridge device.
Another issue was the inability to actually add the CPU in the flooding
domains (broadcast, unknown unicast etc) of a particular switch port.
This meant that a simple ping on a switch interface was not possible
since no broadcast ARP frame would actually reach the CPU queues.
This patch tries to fix exactly these problems by:
* Creating and managing a FDB table for each flooding domain. This means
that when a switch interface is not bridged it will use its own FDB
table. While in bridged mode all DPAA2 switch interfaces under the
same upper will use the same FDB table, thus leverage the same FDB
entries.
* Adding a new MC firmware command - dpsw_set_egress_flood() - through
which the driver can setup the flooding domains as needed. For
example, when the switch interface is standalone, thus not in a
bridge with any other DPAA2 switch port, it will setup its broadcast
and unknown unicast flooding domains to only include the control
interface (the queues that reach the CPU and the driver can dequeue
from). This flooding domain changes when the interface joins a bridge
and is configured to include, beside the control interface, all other
DPAA2 switch interfaces.
We impose a minimum limit of FDB tables available equal to the number of
switch interfaces so that we guarantee that, in the maximal
configuration - all interfaces are standalone, each switch port will
have a private FDB table. At the same time, we only probe DPSW objects
that have the flooding and broadcast replicators configured to be per
FDB (DPSW_*_PER_FDB). Without this, the dpaa2-switch driver would not
be able to configure multiple switching domains.
At probe time, a FDB table will be allocated for each port. At a bridge
join event, the switch port will either continue to use the current FDB
table (if it's the first dpaa2-switch port to join that bridge) or will
switch to use the FDB table associated with the port that it's already
under the bridge. If a FDB switch is necessary, the private FDB table
which was previously used will be returned to the pool of unused FDBs.
Upon a bridge leave, the switch port needs a private FDB table thus it
will search and get the first unused FDB table. This way, all the other
ports remaining under the bridge will continue to use the same FDB
table.
Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
Reviewed-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-03-10 20:14:47 +08:00
|
|
|
fdb_id = dpaa2_switch_port_get_fdb_id(port_priv);
|
2018-03-14 23:55:54 +08:00
|
|
|
err = dpsw_fdb_add_multicast(port_priv->ethsw_data->mc_io, 0,
|
|
|
|
port_priv->ethsw_data->dpsw_handle,
|
staging: dpaa2-switch: properly setup switching domains
Until now, the DPAA2 switch was not capable to properly setup its
switching domains depending on the existence, or lack thereof, of a
upper bridge device. This meant that all switch ports of a DPSW object
were switching by default even though they were not under the same
bridge device.
Another issue was the inability to actually add the CPU in the flooding
domains (broadcast, unknown unicast etc) of a particular switch port.
This meant that a simple ping on a switch interface was not possible
since no broadcast ARP frame would actually reach the CPU queues.
This patch tries to fix exactly these problems by:
* Creating and managing a FDB table for each flooding domain. This means
that when a switch interface is not bridged it will use its own FDB
table. While in bridged mode all DPAA2 switch interfaces under the
same upper will use the same FDB table, thus leverage the same FDB
entries.
* Adding a new MC firmware command - dpsw_set_egress_flood() - through
which the driver can setup the flooding domains as needed. For
example, when the switch interface is standalone, thus not in a
bridge with any other DPAA2 switch port, it will setup its broadcast
and unknown unicast flooding domains to only include the control
interface (the queues that reach the CPU and the driver can dequeue
from). This flooding domain changes when the interface joins a bridge
and is configured to include, beside the control interface, all other
DPAA2 switch interfaces.
We impose a minimum limit of FDB tables available equal to the number of
switch interfaces so that we guarantee that, in the maximal
configuration - all interfaces are standalone, each switch port will
have a private FDB table. At the same time, we only probe DPSW objects
that have the flooding and broadcast replicators configured to be per
FDB (DPSW_*_PER_FDB). Without this, the dpaa2-switch driver would not
be able to configure multiple switching domains.
At probe time, a FDB table will be allocated for each port. At a bridge
join event, the switch port will either continue to use the current FDB
table (if it's the first dpaa2-switch port to join that bridge) or will
switch to use the FDB table associated with the port that it's already
under the bridge. If a FDB switch is necessary, the private FDB table
which was previously used will be returned to the pool of unused FDBs.
Upon a bridge leave, the switch port needs a private FDB table thus it
will search and get the first unused FDB table. This way, all the other
ports remaining under the bridge will continue to use the same FDB
table.
Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
Reviewed-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-03-10 20:14:47 +08:00
|
|
|
fdb_id, &entry);
|
2018-03-14 23:55:54 +08:00
|
|
|
/* Silently discard error for calling multiple times the add command */
|
|
|
|
if (err && err != -ENXIO)
|
|
|
|
netdev_err(port_priv->netdev, "dpsw_fdb_add_multicast err %d\n",
|
|
|
|
err);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2020-10-09 23:30:00 +08:00
|
|
|
static int dpaa2_switch_port_fdb_del_mc(struct ethsw_port_priv *port_priv,
|
|
|
|
const unsigned char *addr)
|
2018-03-14 23:55:54 +08:00
|
|
|
{
|
|
|
|
struct dpsw_fdb_multicast_cfg entry = {0};
|
staging: dpaa2-switch: properly setup switching domains
Until now, the DPAA2 switch was not capable to properly setup its
switching domains depending on the existence, or lack thereof, of a
upper bridge device. This meant that all switch ports of a DPSW object
were switching by default even though they were not under the same
bridge device.
Another issue was the inability to actually add the CPU in the flooding
domains (broadcast, unknown unicast etc) of a particular switch port.
This meant that a simple ping on a switch interface was not possible
since no broadcast ARP frame would actually reach the CPU queues.
This patch tries to fix exactly these problems by:
* Creating and managing a FDB table for each flooding domain. This means
that when a switch interface is not bridged it will use its own FDB
table. While in bridged mode all DPAA2 switch interfaces under the
same upper will use the same FDB table, thus leverage the same FDB
entries.
* Adding a new MC firmware command - dpsw_set_egress_flood() - through
which the driver can setup the flooding domains as needed. For
example, when the switch interface is standalone, thus not in a
bridge with any other DPAA2 switch port, it will setup its broadcast
and unknown unicast flooding domains to only include the control
interface (the queues that reach the CPU and the driver can dequeue
from). This flooding domain changes when the interface joins a bridge
and is configured to include, beside the control interface, all other
DPAA2 switch interfaces.
We impose a minimum limit of FDB tables available equal to the number of
switch interfaces so that we guarantee that, in the maximal
configuration - all interfaces are standalone, each switch port will
have a private FDB table. At the same time, we only probe DPSW objects
that have the flooding and broadcast replicators configured to be per
FDB (DPSW_*_PER_FDB). Without this, the dpaa2-switch driver would not
be able to configure multiple switching domains.
At probe time, a FDB table will be allocated for each port. At a bridge
join event, the switch port will either continue to use the current FDB
table (if it's the first dpaa2-switch port to join that bridge) or will
switch to use the FDB table associated with the port that it's already
under the bridge. If a FDB switch is necessary, the private FDB table
which was previously used will be returned to the pool of unused FDBs.
Upon a bridge leave, the switch port needs a private FDB table thus it
will search and get the first unused FDB table. This way, all the other
ports remaining under the bridge will continue to use the same FDB
table.
Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
Reviewed-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-03-10 20:14:47 +08:00
|
|
|
u16 fdb_id;
|
2018-03-14 23:55:54 +08:00
|
|
|
int err;
|
|
|
|
|
|
|
|
ether_addr_copy(entry.mac_addr, addr);
|
|
|
|
entry.type = DPSW_FDB_ENTRY_STATIC;
|
|
|
|
entry.num_ifs = 1;
|
|
|
|
entry.if_id[0] = port_priv->idx;
|
|
|
|
|
staging: dpaa2-switch: properly setup switching domains
Until now, the DPAA2 switch was not capable to properly setup its
switching domains depending on the existence, or lack thereof, of a
upper bridge device. This meant that all switch ports of a DPSW object
were switching by default even though they were not under the same
bridge device.
Another issue was the inability to actually add the CPU in the flooding
domains (broadcast, unknown unicast etc) of a particular switch port.
This meant that a simple ping on a switch interface was not possible
since no broadcast ARP frame would actually reach the CPU queues.
This patch tries to fix exactly these problems by:
* Creating and managing a FDB table for each flooding domain. This means
that when a switch interface is not bridged it will use its own FDB
table. While in bridged mode all DPAA2 switch interfaces under the
same upper will use the same FDB table, thus leverage the same FDB
entries.
* Adding a new MC firmware command - dpsw_set_egress_flood() - through
which the driver can setup the flooding domains as needed. For
example, when the switch interface is standalone, thus not in a
bridge with any other DPAA2 switch port, it will setup its broadcast
and unknown unicast flooding domains to only include the control
interface (the queues that reach the CPU and the driver can dequeue
from). This flooding domain changes when the interface joins a bridge
and is configured to include, beside the control interface, all other
DPAA2 switch interfaces.
We impose a minimum limit of FDB tables available equal to the number of
switch interfaces so that we guarantee that, in the maximal
configuration - all interfaces are standalone, each switch port will
have a private FDB table. At the same time, we only probe DPSW objects
that have the flooding and broadcast replicators configured to be per
FDB (DPSW_*_PER_FDB). Without this, the dpaa2-switch driver would not
be able to configure multiple switching domains.
At probe time, a FDB table will be allocated for each port. At a bridge
join event, the switch port will either continue to use the current FDB
table (if it's the first dpaa2-switch port to join that bridge) or will
switch to use the FDB table associated with the port that it's already
under the bridge. If a FDB switch is necessary, the private FDB table
which was previously used will be returned to the pool of unused FDBs.
Upon a bridge leave, the switch port needs a private FDB table thus it
will search and get the first unused FDB table. This way, all the other
ports remaining under the bridge will continue to use the same FDB
table.
Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
Reviewed-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-03-10 20:14:47 +08:00
|
|
|
fdb_id = dpaa2_switch_port_get_fdb_id(port_priv);
|
2018-03-14 23:55:54 +08:00
|
|
|
err = dpsw_fdb_remove_multicast(port_priv->ethsw_data->mc_io, 0,
|
|
|
|
port_priv->ethsw_data->dpsw_handle,
|
staging: dpaa2-switch: properly setup switching domains
Until now, the DPAA2 switch was not capable to properly setup its
switching domains depending on the existence, or lack thereof, of a
upper bridge device. This meant that all switch ports of a DPSW object
were switching by default even though they were not under the same
bridge device.
Another issue was the inability to actually add the CPU in the flooding
domains (broadcast, unknown unicast etc) of a particular switch port.
This meant that a simple ping on a switch interface was not possible
since no broadcast ARP frame would actually reach the CPU queues.
This patch tries to fix exactly these problems by:
* Creating and managing a FDB table for each flooding domain. This means
that when a switch interface is not bridged it will use its own FDB
table. While in bridged mode all DPAA2 switch interfaces under the
same upper will use the same FDB table, thus leverage the same FDB
entries.
* Adding a new MC firmware command - dpsw_set_egress_flood() - through
which the driver can setup the flooding domains as needed. For
example, when the switch interface is standalone, thus not in a
bridge with any other DPAA2 switch port, it will setup its broadcast
and unknown unicast flooding domains to only include the control
interface (the queues that reach the CPU and the driver can dequeue
from). This flooding domain changes when the interface joins a bridge
and is configured to include, beside the control interface, all other
DPAA2 switch interfaces.
We impose a minimum limit of FDB tables available equal to the number of
switch interfaces so that we guarantee that, in the maximal
configuration - all interfaces are standalone, each switch port will
have a private FDB table. At the same time, we only probe DPSW objects
that have the flooding and broadcast replicators configured to be per
FDB (DPSW_*_PER_FDB). Without this, the dpaa2-switch driver would not
be able to configure multiple switching domains.
At probe time, a FDB table will be allocated for each port. At a bridge
join event, the switch port will either continue to use the current FDB
table (if it's the first dpaa2-switch port to join that bridge) or will
switch to use the FDB table associated with the port that it's already
under the bridge. If a FDB switch is necessary, the private FDB table
which was previously used will be returned to the pool of unused FDBs.
Upon a bridge leave, the switch port needs a private FDB table thus it
will search and get the first unused FDB table. This way, all the other
ports remaining under the bridge will continue to use the same FDB
table.
Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
Reviewed-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-03-10 20:14:47 +08:00
|
|
|
fdb_id, &entry);
|
2018-03-14 23:55:54 +08:00
|
|
|
/* Silently discard error for calling multiple times the del command */
|
|
|
|
if (err && err != -ENAVAIL)
|
|
|
|
netdev_err(port_priv->netdev,
|
|
|
|
"dpsw_fdb_remove_multicast err %d\n", err);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2020-10-09 23:30:00 +08:00
|
|
|
static void dpaa2_switch_port_get_stats(struct net_device *netdev,
|
|
|
|
struct rtnl_link_stats64 *stats)
|
2018-03-14 23:55:54 +08:00
|
|
|
{
|
|
|
|
struct ethsw_port_priv *port_priv = netdev_priv(netdev);
|
|
|
|
u64 tmp;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
|
|
|
|
port_priv->ethsw_data->dpsw_handle,
|
|
|
|
port_priv->idx,
|
|
|
|
DPSW_CNT_ING_FRAME, &stats->rx_packets);
|
|
|
|
if (err)
|
|
|
|
goto error;
|
|
|
|
|
|
|
|
err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
|
|
|
|
port_priv->ethsw_data->dpsw_handle,
|
|
|
|
port_priv->idx,
|
|
|
|
DPSW_CNT_EGR_FRAME, &stats->tx_packets);
|
|
|
|
if (err)
|
|
|
|
goto error;
|
|
|
|
|
|
|
|
err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
|
|
|
|
port_priv->ethsw_data->dpsw_handle,
|
|
|
|
port_priv->idx,
|
|
|
|
DPSW_CNT_ING_BYTE, &stats->rx_bytes);
|
|
|
|
if (err)
|
|
|
|
goto error;
|
|
|
|
|
|
|
|
err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
|
|
|
|
port_priv->ethsw_data->dpsw_handle,
|
|
|
|
port_priv->idx,
|
|
|
|
DPSW_CNT_EGR_BYTE, &stats->tx_bytes);
|
|
|
|
if (err)
|
|
|
|
goto error;
|
|
|
|
|
|
|
|
err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
|
|
|
|
port_priv->ethsw_data->dpsw_handle,
|
|
|
|
port_priv->idx,
|
|
|
|
DPSW_CNT_ING_FRAME_DISCARD,
|
|
|
|
&stats->rx_dropped);
|
|
|
|
if (err)
|
|
|
|
goto error;
|
|
|
|
|
|
|
|
err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
|
|
|
|
port_priv->ethsw_data->dpsw_handle,
|
|
|
|
port_priv->idx,
|
|
|
|
DPSW_CNT_ING_FLTR_FRAME,
|
|
|
|
&tmp);
|
|
|
|
if (err)
|
|
|
|
goto error;
|
|
|
|
stats->rx_dropped += tmp;
|
|
|
|
|
|
|
|
err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
|
|
|
|
port_priv->ethsw_data->dpsw_handle,
|
|
|
|
port_priv->idx,
|
|
|
|
DPSW_CNT_EGR_FRAME_DISCARD,
|
|
|
|
&stats->tx_dropped);
|
|
|
|
if (err)
|
|
|
|
goto error;
|
|
|
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
error:
|
|
|
|
netdev_err(netdev, "dpsw_if_get_counter err %d\n", err);
|
|
|
|
}
|
|
|
|
|
2020-10-09 23:30:00 +08:00
|
|
|
static bool dpaa2_switch_port_has_offload_stats(const struct net_device *netdev,
|
|
|
|
int attr_id)
|
2018-03-14 23:55:54 +08:00
|
|
|
{
|
|
|
|
return (attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT);
|
|
|
|
}
|
|
|
|
|
2020-10-09 23:30:00 +08:00
|
|
|
static int dpaa2_switch_port_get_offload_stats(int attr_id,
|
|
|
|
const struct net_device *netdev,
|
|
|
|
void *sp)
|
2018-03-14 23:55:54 +08:00
|
|
|
{
|
|
|
|
switch (attr_id) {
|
|
|
|
case IFLA_OFFLOAD_XSTATS_CPU_HIT:
|
2020-10-09 23:30:00 +08:00
|
|
|
dpaa2_switch_port_get_stats((struct net_device *)netdev, sp);
|
2018-03-14 23:55:54 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2020-10-09 23:30:00 +08:00
|
|
|
static int dpaa2_switch_port_change_mtu(struct net_device *netdev, int mtu)
|
2018-03-14 23:55:54 +08:00
|
|
|
{
|
|
|
|
struct ethsw_port_priv *port_priv = netdev_priv(netdev);
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = dpsw_if_set_max_frame_length(port_priv->ethsw_data->mc_io,
|
|
|
|
0,
|
|
|
|
port_priv->ethsw_data->dpsw_handle,
|
|
|
|
port_priv->idx,
|
|
|
|
(u16)ETHSW_L2_MAX_FRM(mtu));
|
|
|
|
if (err) {
|
|
|
|
netdev_err(netdev,
|
|
|
|
"dpsw_if_set_max_frame_length() err %d\n", err);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
netdev->mtu = mtu;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-10-09 23:30:00 +08:00
|
|
|
static int dpaa2_switch_port_carrier_state_sync(struct net_device *netdev)
|
2018-03-14 23:55:54 +08:00
|
|
|
{
|
|
|
|
struct ethsw_port_priv *port_priv = netdev_priv(netdev);
|
|
|
|
struct dpsw_link_state state;
|
|
|
|
int err;
|
|
|
|
|
2020-07-14 21:34:27 +08:00
|
|
|
/* Interrupts are received even though no one issued an 'ifconfig up'
|
|
|
|
* on the switch interface. Ignore these link state update interrupts
|
|
|
|
*/
|
|
|
|
if (!netif_running(netdev))
|
|
|
|
return 0;
|
|
|
|
|
2018-03-14 23:55:54 +08:00
|
|
|
err = dpsw_if_get_link_state(port_priv->ethsw_data->mc_io, 0,
|
|
|
|
port_priv->ethsw_data->dpsw_handle,
|
|
|
|
port_priv->idx, &state);
|
|
|
|
if (err) {
|
|
|
|
netdev_err(netdev, "dpsw_if_get_link_state() err %d\n", err);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
WARN_ONCE(state.up > 1, "Garbage read into link_state");
|
|
|
|
|
|
|
|
if (state.up != port_priv->link_state) {
|
2021-03-10 20:14:45 +08:00
|
|
|
if (state.up) {
|
2018-03-14 23:55:54 +08:00
|
|
|
netif_carrier_on(netdev);
|
2021-03-10 20:14:45 +08:00
|
|
|
netif_tx_start_all_queues(netdev);
|
|
|
|
} else {
|
2018-03-14 23:55:54 +08:00
|
|
|
netif_carrier_off(netdev);
|
2021-03-10 20:14:45 +08:00
|
|
|
netif_tx_stop_all_queues(netdev);
|
|
|
|
}
|
2018-03-14 23:55:54 +08:00
|
|
|
port_priv->link_state = state.up;
|
|
|
|
}
|
2020-07-14 21:34:31 +08:00
|
|
|
|
2018-03-14 23:55:54 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-03-10 20:14:44 +08:00
|
|
|
/* Manage all NAPI instances for the control interface.
|
|
|
|
*
|
|
|
|
* We only have one RX queue and one Tx Conf queue for all
|
|
|
|
* switch ports. Therefore, we only need to enable the NAPI instance once, the
|
|
|
|
* first time one of the switch ports runs .dev_open().
|
|
|
|
*/
|
|
|
|
|
|
|
|
static void dpaa2_switch_enable_ctrl_if_napi(struct ethsw_core *ethsw)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/* Access to the ethsw->napi_users relies on the RTNL lock */
|
|
|
|
ASSERT_RTNL();
|
|
|
|
|
|
|
|
/* a new interface is using the NAPI instance */
|
|
|
|
ethsw->napi_users++;
|
|
|
|
|
|
|
|
/* if there is already a user of the instance, return */
|
|
|
|
if (ethsw->napi_users > 1)
|
|
|
|
return;
|
|
|
|
|
|
|
|
for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++)
|
|
|
|
napi_enable(ðsw->fq[i].napi);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void dpaa2_switch_disable_ctrl_if_napi(struct ethsw_core *ethsw)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/* Access to the ethsw->napi_users relies on the RTNL lock */
|
|
|
|
ASSERT_RTNL();
|
|
|
|
|
|
|
|
/* If we are not the last interface using the NAPI, return */
|
|
|
|
ethsw->napi_users--;
|
|
|
|
if (ethsw->napi_users)
|
|
|
|
return;
|
|
|
|
|
|
|
|
for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++)
|
|
|
|
napi_disable(ðsw->fq[i].napi);
|
|
|
|
}
|
|
|
|
|
2020-10-09 23:30:00 +08:00
|
|
|
static int dpaa2_switch_port_open(struct net_device *netdev)
|
2018-03-14 23:55:54 +08:00
|
|
|
{
|
|
|
|
struct ethsw_port_priv *port_priv = netdev_priv(netdev);
|
2021-03-10 20:14:44 +08:00
|
|
|
struct ethsw_core *ethsw = port_priv->ethsw_data;
|
2018-03-14 23:55:54 +08:00
|
|
|
int err;
|
|
|
|
|
2020-07-14 21:34:26 +08:00
|
|
|
/* Explicitly set carrier off, otherwise
|
|
|
|
* netif_carrier_ok() will return true and cause 'ip link show'
|
|
|
|
* to report the LOWER_UP flag, even though the link
|
|
|
|
* notification wasn't even received.
|
|
|
|
*/
|
|
|
|
netif_carrier_off(netdev);
|
|
|
|
|
2018-03-14 23:55:54 +08:00
|
|
|
err = dpsw_if_enable(port_priv->ethsw_data->mc_io, 0,
|
|
|
|
port_priv->ethsw_data->dpsw_handle,
|
|
|
|
port_priv->idx);
|
|
|
|
if (err) {
|
|
|
|
netdev_err(netdev, "dpsw_if_enable err %d\n", err);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* sync carrier state */
|
2020-10-09 23:30:00 +08:00
|
|
|
err = dpaa2_switch_port_carrier_state_sync(netdev);
|
2018-03-14 23:55:54 +08:00
|
|
|
if (err) {
|
|
|
|
netdev_err(netdev,
|
2020-10-09 23:30:00 +08:00
|
|
|
"dpaa2_switch_port_carrier_state_sync err %d\n", err);
|
2018-03-14 23:55:54 +08:00
|
|
|
goto err_carrier_sync;
|
|
|
|
}
|
|
|
|
|
2021-03-10 20:14:44 +08:00
|
|
|
dpaa2_switch_enable_ctrl_if_napi(ethsw);
|
|
|
|
|
2018-03-14 23:55:54 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_carrier_sync:
|
|
|
|
dpsw_if_disable(port_priv->ethsw_data->mc_io, 0,
|
|
|
|
port_priv->ethsw_data->dpsw_handle,
|
|
|
|
port_priv->idx);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2020-10-09 23:30:00 +08:00
|
|
|
static int dpaa2_switch_port_stop(struct net_device *netdev)
|
2018-03-14 23:55:54 +08:00
|
|
|
{
|
|
|
|
struct ethsw_port_priv *port_priv = netdev_priv(netdev);
|
2021-03-10 20:14:44 +08:00
|
|
|
struct ethsw_core *ethsw = port_priv->ethsw_data;
|
2018-03-14 23:55:54 +08:00
|
|
|
int err;
|
|
|
|
|
|
|
|
err = dpsw_if_disable(port_priv->ethsw_data->mc_io, 0,
|
|
|
|
port_priv->ethsw_data->dpsw_handle,
|
|
|
|
port_priv->idx);
|
|
|
|
if (err) {
|
|
|
|
netdev_err(netdev, "dpsw_if_disable err %d\n", err);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2021-03-10 20:14:44 +08:00
|
|
|
dpaa2_switch_disable_ctrl_if_napi(ethsw);
|
|
|
|
|
2018-03-14 23:55:54 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-10-09 23:30:00 +08:00
|
|
|
static int dpaa2_switch_port_parent_id(struct net_device *dev,
|
|
|
|
struct netdev_phys_item_id *ppid)
|
2019-02-07 01:45:44 +08:00
|
|
|
{
|
|
|
|
struct ethsw_port_priv *port_priv = netdev_priv(dev);
|
|
|
|
|
|
|
|
ppid->id_len = 1;
|
|
|
|
ppid->id[0] = port_priv->ethsw_data->dev_id;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-10-09 23:30:00 +08:00
|
|
|
static int dpaa2_switch_port_get_phys_name(struct net_device *netdev, char *name,
|
|
|
|
size_t len)
|
2019-07-05 22:27:14 +08:00
|
|
|
{
|
|
|
|
struct ethsw_port_priv *port_priv = netdev_priv(netdev);
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = snprintf(name, len, "p%d", port_priv->idx);
|
|
|
|
if (err >= len)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-07-30 00:11:50 +08:00
|
|
|
struct ethsw_dump_ctx {
|
|
|
|
struct net_device *dev;
|
|
|
|
struct sk_buff *skb;
|
|
|
|
struct netlink_callback *cb;
|
|
|
|
int idx;
|
|
|
|
};
|
|
|
|
|
2020-10-09 23:30:00 +08:00
|
|
|
static int dpaa2_switch_fdb_dump_nl(struct fdb_dump_entry *entry,
|
|
|
|
struct ethsw_dump_ctx *dump)
|
2019-07-30 00:11:50 +08:00
|
|
|
{
|
|
|
|
int is_dynamic = entry->type & DPSW_FDB_ENTRY_DINAMIC;
|
|
|
|
u32 portid = NETLINK_CB(dump->cb->skb).portid;
|
|
|
|
u32 seq = dump->cb->nlh->nlmsg_seq;
|
|
|
|
struct nlmsghdr *nlh;
|
|
|
|
struct ndmsg *ndm;
|
|
|
|
|
|
|
|
if (dump->idx < dump->cb->args[2])
|
|
|
|
goto skip;
|
|
|
|
|
|
|
|
nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH,
|
|
|
|
sizeof(*ndm), NLM_F_MULTI);
|
|
|
|
if (!nlh)
|
|
|
|
return -EMSGSIZE;
|
|
|
|
|
|
|
|
ndm = nlmsg_data(nlh);
|
|
|
|
ndm->ndm_family = AF_BRIDGE;
|
|
|
|
ndm->ndm_pad1 = 0;
|
|
|
|
ndm->ndm_pad2 = 0;
|
|
|
|
ndm->ndm_flags = NTF_SELF;
|
|
|
|
ndm->ndm_type = 0;
|
|
|
|
ndm->ndm_ifindex = dump->dev->ifindex;
|
|
|
|
ndm->ndm_state = is_dynamic ? NUD_REACHABLE : NUD_NOARP;
|
|
|
|
|
|
|
|
if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, entry->mac_addr))
|
|
|
|
goto nla_put_failure;
|
|
|
|
|
|
|
|
nlmsg_end(dump->skb, nlh);
|
|
|
|
|
|
|
|
skip:
|
|
|
|
dump->idx++;
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
nla_put_failure:
|
|
|
|
nlmsg_cancel(dump->skb, nlh);
|
|
|
|
return -EMSGSIZE;
|
|
|
|
}
|
|
|
|
|
2020-10-09 23:30:00 +08:00
|
|
|
static int dpaa2_switch_port_fdb_valid_entry(struct fdb_dump_entry *entry,
|
|
|
|
struct ethsw_port_priv *port_priv)
|
2019-07-30 00:11:50 +08:00
|
|
|
{
|
|
|
|
int idx = port_priv->idx;
|
|
|
|
int valid;
|
|
|
|
|
|
|
|
if (entry->type & DPSW_FDB_ENTRY_TYPE_UNICAST)
|
|
|
|
valid = entry->if_info == port_priv->idx;
|
|
|
|
else
|
|
|
|
valid = entry->if_mask[idx / 8] & BIT(idx % 8);
|
|
|
|
|
|
|
|
return valid;
|
|
|
|
}
|
|
|
|
|
2021-03-10 20:14:50 +08:00
|
|
|
static int dpaa2_switch_fdb_iterate(struct ethsw_port_priv *port_priv,
|
|
|
|
dpaa2_switch_fdb_cb_t cb, void *data)
|
2019-07-30 00:11:50 +08:00
|
|
|
{
|
2021-03-10 20:14:50 +08:00
|
|
|
struct net_device *net_dev = port_priv->netdev;
|
2019-07-30 00:11:50 +08:00
|
|
|
struct ethsw_core *ethsw = port_priv->ethsw_data;
|
|
|
|
struct device *dev = net_dev->dev.parent;
|
|
|
|
struct fdb_dump_entry *fdb_entries;
|
|
|
|
struct fdb_dump_entry fdb_entry;
|
|
|
|
dma_addr_t fdb_dump_iova;
|
|
|
|
u16 num_fdb_entries;
|
|
|
|
u32 fdb_dump_size;
|
|
|
|
int err = 0, i;
|
|
|
|
u8 *dma_mem;
|
staging: dpaa2-switch: properly setup switching domains
Until now, the DPAA2 switch was not capable to properly setup its
switching domains depending on the existence, or lack thereof, of a
upper bridge device. This meant that all switch ports of a DPSW object
were switching by default even though they were not under the same
bridge device.
Another issue was the inability to actually add the CPU in the flooding
domains (broadcast, unknown unicast etc) of a particular switch port.
This meant that a simple ping on a switch interface was not possible
since no broadcast ARP frame would actually reach the CPU queues.
This patch tries to fix exactly these problems by:
* Creating and managing a FDB table for each flooding domain. This means
that when a switch interface is not bridged it will use its own FDB
table. While in bridged mode all DPAA2 switch interfaces under the
same upper will use the same FDB table, thus leverage the same FDB
entries.
* Adding a new MC firmware command - dpsw_set_egress_flood() - through
which the driver can setup the flooding domains as needed. For
example, when the switch interface is standalone, thus not in a
bridge with any other DPAA2 switch port, it will setup its broadcast
and unknown unicast flooding domains to only include the control
interface (the queues that reach the CPU and the driver can dequeue
from). This flooding domain changes when the interface joins a bridge
and is configured to include, beside the control interface, all other
DPAA2 switch interfaces.
We impose a minimum limit of FDB tables available equal to the number of
switch interfaces so that we guarantee that, in the maximal
configuration - all interfaces are standalone, each switch port will
have a private FDB table. At the same time, we only probe DPSW objects
that have the flooding and broadcast replicators configured to be per
FDB (DPSW_*_PER_FDB). Without this, the dpaa2-switch driver would not
be able to configure multiple switching domains.
At probe time, a FDB table will be allocated for each port. At a bridge
join event, the switch port will either continue to use the current FDB
table (if it's the first dpaa2-switch port to join that bridge) or will
switch to use the FDB table associated with the port that it's already
under the bridge. If a FDB switch is necessary, the private FDB table
which was previously used will be returned to the pool of unused FDBs.
Upon a bridge leave, the switch port needs a private FDB table thus it
will search and get the first unused FDB table. This way, all the other
ports remaining under the bridge will continue to use the same FDB
table.
Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
Reviewed-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-03-10 20:14:47 +08:00
|
|
|
u16 fdb_id;
|
2019-07-30 00:11:50 +08:00
|
|
|
|
|
|
|
fdb_dump_size = ethsw->sw_attr.max_fdb_entries * sizeof(fdb_entry);
|
|
|
|
dma_mem = kzalloc(fdb_dump_size, GFP_KERNEL);
|
|
|
|
if (!dma_mem)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
fdb_dump_iova = dma_map_single(dev, dma_mem, fdb_dump_size,
|
|
|
|
DMA_FROM_DEVICE);
|
|
|
|
if (dma_mapping_error(dev, fdb_dump_iova)) {
|
|
|
|
netdev_err(net_dev, "dma_map_single() failed\n");
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto err_map;
|
|
|
|
}
|
|
|
|
|
staging: dpaa2-switch: properly setup switching domains
Until now, the DPAA2 switch was not capable to properly setup its
switching domains depending on the existence, or lack thereof, of a
upper bridge device. This meant that all switch ports of a DPSW object
were switching by default even though they were not under the same
bridge device.
Another issue was the inability to actually add the CPU in the flooding
domains (broadcast, unknown unicast etc) of a particular switch port.
This meant that a simple ping on a switch interface was not possible
since no broadcast ARP frame would actually reach the CPU queues.
This patch tries to fix exactly these problems by:
* Creating and managing a FDB table for each flooding domain. This means
that when a switch interface is not bridged it will use its own FDB
table. While in bridged mode all DPAA2 switch interfaces under the
same upper will use the same FDB table, thus leverage the same FDB
entries.
* Adding a new MC firmware command - dpsw_set_egress_flood() - through
which the driver can setup the flooding domains as needed. For
example, when the switch interface is standalone, thus not in a
bridge with any other DPAA2 switch port, it will setup its broadcast
and unknown unicast flooding domains to only include the control
interface (the queues that reach the CPU and the driver can dequeue
from). This flooding domain changes when the interface joins a bridge
and is configured to include, beside the control interface, all other
DPAA2 switch interfaces.
We impose a minimum limit of FDB tables available equal to the number of
switch interfaces so that we guarantee that, in the maximal
configuration - all interfaces are standalone, each switch port will
have a private FDB table. At the same time, we only probe DPSW objects
that have the flooding and broadcast replicators configured to be per
FDB (DPSW_*_PER_FDB). Without this, the dpaa2-switch driver would not
be able to configure multiple switching domains.
At probe time, a FDB table will be allocated for each port. At a bridge
join event, the switch port will either continue to use the current FDB
table (if it's the first dpaa2-switch port to join that bridge) or will
switch to use the FDB table associated with the port that it's already
under the bridge. If a FDB switch is necessary, the private FDB table
which was previously used will be returned to the pool of unused FDBs.
Upon a bridge leave, the switch port needs a private FDB table thus it
will search and get the first unused FDB table. This way, all the other
ports remaining under the bridge will continue to use the same FDB
table.
Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
Reviewed-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-03-10 20:14:47 +08:00
|
|
|
fdb_id = dpaa2_switch_port_get_fdb_id(port_priv);
|
|
|
|
err = dpsw_fdb_dump(ethsw->mc_io, 0, ethsw->dpsw_handle, fdb_id,
|
2019-07-30 00:11:50 +08:00
|
|
|
fdb_dump_iova, fdb_dump_size, &num_fdb_entries);
|
|
|
|
if (err) {
|
|
|
|
netdev_err(net_dev, "dpsw_fdb_dump() = %d\n", err);
|
|
|
|
goto err_dump;
|
|
|
|
}
|
|
|
|
|
|
|
|
dma_unmap_single(dev, fdb_dump_iova, fdb_dump_size, DMA_FROM_DEVICE);
|
|
|
|
|
|
|
|
fdb_entries = (struct fdb_dump_entry *)dma_mem;
|
|
|
|
for (i = 0; i < num_fdb_entries; i++) {
|
|
|
|
fdb_entry = fdb_entries[i];
|
|
|
|
|
2021-03-10 20:14:50 +08:00
|
|
|
err = cb(port_priv, &fdb_entry, data);
|
2019-07-30 00:11:50 +08:00
|
|
|
if (err)
|
|
|
|
goto end;
|
|
|
|
}
|
|
|
|
|
|
|
|
end:
|
|
|
|
kfree(dma_mem);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_dump:
|
|
|
|
dma_unmap_single(dev, fdb_dump_iova, fdb_dump_size, DMA_TO_DEVICE);
|
|
|
|
err_map:
|
|
|
|
kfree(dma_mem);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2021-03-10 20:14:50 +08:00
|
|
|
static int dpaa2_switch_fdb_entry_dump(struct ethsw_port_priv *port_priv,
|
|
|
|
struct fdb_dump_entry *fdb_entry,
|
|
|
|
void *data)
|
|
|
|
{
|
|
|
|
if (!dpaa2_switch_port_fdb_valid_entry(fdb_entry, port_priv))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return dpaa2_switch_fdb_dump_nl(fdb_entry, data);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int dpaa2_switch_port_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
|
|
|
|
struct net_device *net_dev,
|
|
|
|
struct net_device *filter_dev, int *idx)
|
|
|
|
{
|
|
|
|
struct ethsw_port_priv *port_priv = netdev_priv(net_dev);
|
|
|
|
struct ethsw_dump_ctx dump = {
|
|
|
|
.dev = net_dev,
|
|
|
|
.skb = skb,
|
|
|
|
.cb = cb,
|
|
|
|
.idx = *idx,
|
|
|
|
};
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = dpaa2_switch_fdb_iterate(port_priv, dpaa2_switch_fdb_entry_dump, &dump);
|
|
|
|
*idx = dump.idx;
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int dpaa2_switch_fdb_entry_fast_age(struct ethsw_port_priv *port_priv,
|
|
|
|
struct fdb_dump_entry *fdb_entry,
|
|
|
|
void *data __always_unused)
|
|
|
|
{
|
|
|
|
if (!dpaa2_switch_port_fdb_valid_entry(fdb_entry, port_priv))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (!(fdb_entry->type & DPSW_FDB_ENTRY_TYPE_DYNAMIC))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (fdb_entry->type & DPSW_FDB_ENTRY_TYPE_UNICAST)
|
|
|
|
dpaa2_switch_port_fdb_del_uc(port_priv, fdb_entry->mac_addr);
|
|
|
|
else
|
|
|
|
dpaa2_switch_port_fdb_del_mc(port_priv, fdb_entry->mac_addr);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void dpaa2_switch_port_fast_age(struct ethsw_port_priv *port_priv)
|
|
|
|
{
|
|
|
|
dpaa2_switch_fdb_iterate(port_priv,
|
|
|
|
dpaa2_switch_fdb_entry_fast_age, NULL);
|
|
|
|
}
|
|
|
|
|
2021-03-10 20:14:49 +08:00
|
|
|
static int dpaa2_switch_port_vlan_add(struct net_device *netdev, __be16 proto,
|
|
|
|
u16 vid)
|
|
|
|
{
|
|
|
|
struct switchdev_obj_port_vlan vlan = {
|
|
|
|
.obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
|
|
|
|
.vid = vid,
|
|
|
|
.obj.orig_dev = netdev,
|
|
|
|
/* This API only allows programming tagged, non-PVID VIDs */
|
|
|
|
.flags = 0,
|
|
|
|
};
|
|
|
|
|
|
|
|
return dpaa2_switch_port_vlans_add(netdev, &vlan);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int dpaa2_switch_port_vlan_kill(struct net_device *netdev, __be16 proto,
|
|
|
|
u16 vid)
|
|
|
|
{
|
|
|
|
struct switchdev_obj_port_vlan vlan = {
|
|
|
|
.obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
|
|
|
|
.vid = vid,
|
|
|
|
.obj.orig_dev = netdev,
|
|
|
|
/* This API only allows programming tagged, non-PVID VIDs */
|
|
|
|
.flags = 0,
|
|
|
|
};
|
|
|
|
|
|
|
|
return dpaa2_switch_port_vlans_del(netdev, &vlan);
|
|
|
|
}
|
|
|
|
|
2020-10-09 23:30:00 +08:00
|
|
|
static int dpaa2_switch_port_set_mac_addr(struct ethsw_port_priv *port_priv)
|
2020-07-14 21:34:31 +08:00
|
|
|
{
|
|
|
|
struct ethsw_core *ethsw = port_priv->ethsw_data;
|
|
|
|
struct net_device *net_dev = port_priv->netdev;
|
|
|
|
struct device *dev = net_dev->dev.parent;
|
|
|
|
u8 mac_addr[ETH_ALEN];
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (!(ethsw->features & ETHSW_FEATURE_MAC_ADDR))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* Get firmware address, if any */
|
|
|
|
err = dpsw_if_get_port_mac_addr(ethsw->mc_io, 0, ethsw->dpsw_handle,
|
|
|
|
port_priv->idx, mac_addr);
|
|
|
|
if (err) {
|
|
|
|
dev_err(dev, "dpsw_if_get_port_mac_addr() failed\n");
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* First check if firmware has any address configured by bootloader */
|
|
|
|
if (!is_zero_ether_addr(mac_addr)) {
|
|
|
|
memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len);
|
|
|
|
} else {
|
|
|
|
/* No MAC address configured, fill in net_dev->dev_addr
|
|
|
|
* with a random one
|
|
|
|
*/
|
|
|
|
eth_hw_addr_random(net_dev);
|
|
|
|
dev_dbg_once(dev, "device(s) have all-zero hwaddr, replaced with random\n");
|
|
|
|
|
|
|
|
/* Override NET_ADDR_RANDOM set by eth_hw_addr_random(); for all
|
|
|
|
* practical purposes, this will be our "permanent" mac address,
|
|
|
|
* at least until the next reboot. This move will also permit
|
|
|
|
* register_netdevice() to properly fill up net_dev->perm_addr.
|
|
|
|
*/
|
|
|
|
net_dev->addr_assign_type = NET_ADDR_PERM;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-03-10 20:14:44 +08:00
|
|
|
static void dpaa2_switch_free_fd(const struct ethsw_core *ethsw,
|
|
|
|
const struct dpaa2_fd *fd)
|
|
|
|
{
|
|
|
|
struct device *dev = ethsw->dev;
|
|
|
|
unsigned char *buffer_start;
|
|
|
|
struct sk_buff **skbh, *skb;
|
|
|
|
dma_addr_t fd_addr;
|
|
|
|
|
|
|
|
fd_addr = dpaa2_fd_get_addr(fd);
|
|
|
|
skbh = dpaa2_iova_to_virt(ethsw->iommu_domain, fd_addr);
|
|
|
|
|
|
|
|
skb = *skbh;
|
|
|
|
buffer_start = (unsigned char *)skbh;
|
|
|
|
|
|
|
|
dma_unmap_single(dev, fd_addr,
|
|
|
|
skb_tail_pointer(skb) - buffer_start,
|
|
|
|
DMA_TO_DEVICE);
|
|
|
|
|
|
|
|
/* Move on with skb release */
|
|
|
|
dev_kfree_skb(skb);
|
|
|
|
}
|
|
|
|
|
2021-03-10 20:14:45 +08:00
|
|
|
static int dpaa2_switch_build_single_fd(struct ethsw_core *ethsw,
|
|
|
|
struct sk_buff *skb,
|
|
|
|
struct dpaa2_fd *fd)
|
|
|
|
{
|
|
|
|
struct device *dev = ethsw->dev;
|
|
|
|
struct sk_buff **skbh;
|
|
|
|
dma_addr_t addr;
|
|
|
|
u8 *buff_start;
|
|
|
|
void *hwa;
|
|
|
|
|
|
|
|
buff_start = PTR_ALIGN(skb->data - DPAA2_SWITCH_TX_DATA_OFFSET -
|
|
|
|
DPAA2_SWITCH_TX_BUF_ALIGN,
|
|
|
|
DPAA2_SWITCH_TX_BUF_ALIGN);
|
|
|
|
|
|
|
|
/* Clear FAS to have consistent values for TX confirmation. It is
|
|
|
|
* located in the first 8 bytes of the buffer's hardware annotation
|
|
|
|
* area
|
|
|
|
*/
|
|
|
|
hwa = buff_start + DPAA2_SWITCH_SWA_SIZE;
|
|
|
|
memset(hwa, 0, 8);
|
|
|
|
|
|
|
|
/* Store a backpointer to the skb at the beginning of the buffer
|
|
|
|
* (in the private data area) such that we can release it
|
|
|
|
* on Tx confirm
|
|
|
|
*/
|
|
|
|
skbh = (struct sk_buff **)buff_start;
|
|
|
|
*skbh = skb;
|
|
|
|
|
|
|
|
addr = dma_map_single(dev, buff_start,
|
|
|
|
skb_tail_pointer(skb) - buff_start,
|
|
|
|
DMA_TO_DEVICE);
|
|
|
|
if (unlikely(dma_mapping_error(dev, addr)))
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
/* Setup the FD fields */
|
|
|
|
memset(fd, 0, sizeof(*fd));
|
|
|
|
|
|
|
|
dpaa2_fd_set_addr(fd, addr);
|
|
|
|
dpaa2_fd_set_offset(fd, (u16)(skb->data - buff_start));
|
|
|
|
dpaa2_fd_set_len(fd, skb->len);
|
|
|
|
dpaa2_fd_set_format(fd, dpaa2_fd_single);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static netdev_tx_t dpaa2_switch_port_tx(struct sk_buff *skb,
|
|
|
|
struct net_device *net_dev)
|
|
|
|
{
|
|
|
|
struct ethsw_port_priv *port_priv = netdev_priv(net_dev);
|
|
|
|
struct ethsw_core *ethsw = port_priv->ethsw_data;
|
|
|
|
int retries = DPAA2_SWITCH_SWP_BUSY_RETRIES;
|
|
|
|
struct dpaa2_fd fd;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (unlikely(skb_headroom(skb) < DPAA2_SWITCH_NEEDED_HEADROOM)) {
|
|
|
|
struct sk_buff *ns;
|
|
|
|
|
|
|
|
ns = skb_realloc_headroom(skb, DPAA2_SWITCH_NEEDED_HEADROOM);
|
|
|
|
if (unlikely(!ns)) {
|
|
|
|
net_err_ratelimited("%s: Error reallocating skb headroom\n", net_dev->name);
|
|
|
|
goto err_free_skb;
|
|
|
|
}
|
|
|
|
dev_consume_skb_any(skb);
|
|
|
|
skb = ns;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* We'll be holding a back-reference to the skb until Tx confirmation */
|
|
|
|
skb = skb_unshare(skb, GFP_ATOMIC);
|
|
|
|
if (unlikely(!skb)) {
|
|
|
|
/* skb_unshare() has already freed the skb */
|
|
|
|
net_err_ratelimited("%s: Error copying the socket buffer\n", net_dev->name);
|
|
|
|
goto err_exit;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* At this stage, we do not support non-linear skbs so just try to
|
|
|
|
* linearize the skb and if that's not working, just drop the packet.
|
|
|
|
*/
|
|
|
|
err = skb_linearize(skb);
|
|
|
|
if (err) {
|
|
|
|
net_err_ratelimited("%s: skb_linearize error (%d)!\n", net_dev->name, err);
|
|
|
|
goto err_free_skb;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = dpaa2_switch_build_single_fd(ethsw, skb, &fd);
|
|
|
|
if (unlikely(err)) {
|
|
|
|
net_err_ratelimited("%s: ethsw_build_*_fd() %d\n", net_dev->name, err);
|
|
|
|
goto err_free_skb;
|
|
|
|
}
|
|
|
|
|
|
|
|
do {
|
|
|
|
err = dpaa2_io_service_enqueue_qd(NULL,
|
|
|
|
port_priv->tx_qdid,
|
|
|
|
8, 0, &fd);
|
|
|
|
retries--;
|
|
|
|
} while (err == -EBUSY && retries);
|
|
|
|
|
|
|
|
if (unlikely(err < 0)) {
|
|
|
|
dpaa2_switch_free_fd(ethsw, &fd);
|
|
|
|
goto err_exit;
|
|
|
|
}
|
|
|
|
|
|
|
|
return NETDEV_TX_OK;
|
|
|
|
|
|
|
|
err_free_skb:
|
|
|
|
dev_kfree_skb(skb);
|
|
|
|
err_exit:
|
|
|
|
return NETDEV_TX_OK;
|
|
|
|
}
|
|
|
|
|
2020-10-09 23:30:00 +08:00
|
|
|
static const struct net_device_ops dpaa2_switch_port_ops = {
|
|
|
|
.ndo_open = dpaa2_switch_port_open,
|
|
|
|
.ndo_stop = dpaa2_switch_port_stop,
|
2018-03-14 23:55:54 +08:00
|
|
|
|
|
|
|
.ndo_set_mac_address = eth_mac_addr,
|
2020-10-09 23:30:00 +08:00
|
|
|
.ndo_get_stats64 = dpaa2_switch_port_get_stats,
|
|
|
|
.ndo_change_mtu = dpaa2_switch_port_change_mtu,
|
|
|
|
.ndo_has_offload_stats = dpaa2_switch_port_has_offload_stats,
|
|
|
|
.ndo_get_offload_stats = dpaa2_switch_port_get_offload_stats,
|
|
|
|
.ndo_fdb_dump = dpaa2_switch_port_fdb_dump,
|
2021-03-10 20:14:49 +08:00
|
|
|
.ndo_vlan_rx_add_vid = dpaa2_switch_port_vlan_add,
|
|
|
|
.ndo_vlan_rx_kill_vid = dpaa2_switch_port_vlan_kill,
|
2020-10-09 23:30:00 +08:00
|
|
|
|
2021-03-10 20:14:45 +08:00
|
|
|
.ndo_start_xmit = dpaa2_switch_port_tx,
|
2020-10-09 23:30:00 +08:00
|
|
|
.ndo_get_port_parent_id = dpaa2_switch_port_parent_id,
|
|
|
|
.ndo_get_phys_port_name = dpaa2_switch_port_get_phys_name,
|
2018-03-14 23:55:54 +08:00
|
|
|
};
|
|
|
|
|
2021-03-10 20:14:48 +08:00
|
|
|
bool dpaa2_switch_port_dev_check(const struct net_device *netdev)
|
2020-07-21 17:19:14 +08:00
|
|
|
{
|
2021-03-10 20:14:48 +08:00
|
|
|
return netdev->netdev_ops == &dpaa2_switch_port_ops;
|
2020-07-21 17:19:14 +08:00
|
|
|
}
|
|
|
|
|
2020-10-09 23:30:00 +08:00
|
|
|
static void dpaa2_switch_links_state_update(struct ethsw_core *ethsw)
|
2018-03-14 23:55:54 +08:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
2020-07-14 21:34:31 +08:00
|
|
|
for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
|
2020-10-09 23:30:00 +08:00
|
|
|
dpaa2_switch_port_carrier_state_sync(ethsw->ports[i]->netdev);
|
|
|
|
dpaa2_switch_port_set_mac_addr(ethsw->ports[i]);
|
2020-07-14 21:34:31 +08:00
|
|
|
}
|
2018-03-14 23:55:54 +08:00
|
|
|
}
|
|
|
|
|
2020-10-09 23:30:00 +08:00
|
|
|
static irqreturn_t dpaa2_switch_irq0_handler_thread(int irq_num, void *arg)
|
2018-03-14 23:55:54 +08:00
|
|
|
{
|
|
|
|
struct device *dev = (struct device *)arg;
|
|
|
|
struct ethsw_core *ethsw = dev_get_drvdata(dev);
|
|
|
|
|
|
|
|
/* Mask the events and the if_id reserved bits to be cleared on read */
|
|
|
|
u32 status = DPSW_IRQ_EVENT_LINK_CHANGED | 0xFFFF0000;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = dpsw_get_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle,
|
|
|
|
DPSW_IRQ_INDEX_IF, &status);
|
|
|
|
if (err) {
|
2019-08-13 20:43:00 +08:00
|
|
|
dev_err(dev, "Can't get irq status (err %d)\n", err);
|
2018-03-14 23:55:54 +08:00
|
|
|
|
|
|
|
err = dpsw_clear_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle,
|
|
|
|
DPSW_IRQ_INDEX_IF, 0xFFFFFFFF);
|
|
|
|
if (err)
|
2019-08-13 20:43:00 +08:00
|
|
|
dev_err(dev, "Can't clear irq status (err %d)\n", err);
|
2018-03-14 23:55:54 +08:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (status & DPSW_IRQ_EVENT_LINK_CHANGED)
|
2020-10-09 23:30:00 +08:00
|
|
|
dpaa2_switch_links_state_update(ethsw);
|
2018-03-14 23:55:54 +08:00
|
|
|
|
|
|
|
out:
|
|
|
|
return IRQ_HANDLED;
|
|
|
|
}
|
|
|
|
|
2020-10-09 23:30:00 +08:00
|
|
|
static int dpaa2_switch_setup_irqs(struct fsl_mc_device *sw_dev)
|
2018-03-14 23:55:54 +08:00
|
|
|
{
|
|
|
|
struct device *dev = &sw_dev->dev;
|
|
|
|
struct ethsw_core *ethsw = dev_get_drvdata(dev);
|
|
|
|
u32 mask = DPSW_IRQ_EVENT_LINK_CHANGED;
|
|
|
|
struct fsl_mc_device_irq *irq;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = fsl_mc_allocate_irqs(sw_dev);
|
|
|
|
if (err) {
|
|
|
|
dev_err(dev, "MC irqs allocation failed\n");
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (WARN_ON(sw_dev->obj_desc.irq_count != DPSW_IRQ_NUM)) {
|
|
|
|
err = -EINVAL;
|
|
|
|
goto free_irq;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle,
|
|
|
|
DPSW_IRQ_INDEX_IF, 0);
|
|
|
|
if (err) {
|
|
|
|
dev_err(dev, "dpsw_set_irq_enable err %d\n", err);
|
|
|
|
goto free_irq;
|
|
|
|
}
|
|
|
|
|
|
|
|
irq = sw_dev->irqs[DPSW_IRQ_INDEX_IF];
|
|
|
|
|
|
|
|
err = devm_request_threaded_irq(dev, irq->msi_desc->irq,
|
|
|
|
NULL,
|
2020-10-09 23:30:00 +08:00
|
|
|
dpaa2_switch_irq0_handler_thread,
|
2018-03-14 23:55:54 +08:00
|
|
|
IRQF_NO_SUSPEND | IRQF_ONESHOT,
|
|
|
|
dev_name(dev), dev);
|
|
|
|
if (err) {
|
2019-08-13 20:43:00 +08:00
|
|
|
dev_err(dev, "devm_request_threaded_irq(): %d\n", err);
|
2018-03-14 23:55:54 +08:00
|
|
|
goto free_irq;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = dpsw_set_irq_mask(ethsw->mc_io, 0, ethsw->dpsw_handle,
|
|
|
|
DPSW_IRQ_INDEX_IF, mask);
|
|
|
|
if (err) {
|
2019-08-13 20:43:00 +08:00
|
|
|
dev_err(dev, "dpsw_set_irq_mask(): %d\n", err);
|
2018-03-14 23:55:54 +08:00
|
|
|
goto free_devm_irq;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle,
|
|
|
|
DPSW_IRQ_INDEX_IF, 1);
|
|
|
|
if (err) {
|
2019-08-13 20:43:00 +08:00
|
|
|
dev_err(dev, "dpsw_set_irq_enable(): %d\n", err);
|
2018-03-14 23:55:54 +08:00
|
|
|
goto free_devm_irq;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
free_devm_irq:
|
|
|
|
devm_free_irq(dev, irq->msi_desc->irq, dev);
|
|
|
|
free_irq:
|
|
|
|
fsl_mc_free_irqs(sw_dev);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2020-10-09 23:30:00 +08:00
|
|
|
static void dpaa2_switch_teardown_irqs(struct fsl_mc_device *sw_dev)
|
2018-03-14 23:55:54 +08:00
|
|
|
{
|
|
|
|
struct device *dev = &sw_dev->dev;
|
|
|
|
struct ethsw_core *ethsw = dev_get_drvdata(dev);
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle,
|
|
|
|
DPSW_IRQ_INDEX_IF, 0);
|
|
|
|
if (err)
|
|
|
|
dev_err(dev, "dpsw_set_irq_enable err %d\n", err);
|
|
|
|
|
|
|
|
fsl_mc_free_irqs(sw_dev);
|
|
|
|
}
|
|
|
|
|
2021-03-23 04:58:56 +08:00
|
|
|
static int dpaa2_switch_port_set_learning(struct ethsw_port_priv *port_priv, bool enable)
|
|
|
|
{
|
|
|
|
struct ethsw_core *ethsw = port_priv->ethsw_data;
|
|
|
|
enum dpsw_learning_mode learn_mode;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (enable)
|
|
|
|
learn_mode = DPSW_LEARNING_MODE_HW;
|
|
|
|
else
|
|
|
|
learn_mode = DPSW_LEARNING_MODE_DIS;
|
|
|
|
|
|
|
|
err = dpsw_if_set_learning_mode(ethsw->mc_io, 0, ethsw->dpsw_handle,
|
|
|
|
port_priv->idx, learn_mode);
|
|
|
|
if (err)
|
|
|
|
netdev_err(port_priv->netdev, "dpsw_if_set_learning_mode err %d\n", err);
|
|
|
|
|
|
|
|
if (!enable)
|
|
|
|
dpaa2_switch_port_fast_age(port_priv);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2021-03-30 22:54:19 +08:00
|
|
|
static int dpaa2_switch_port_attr_stp_state_set(struct net_device *netdev,
|
|
|
|
u8 state)
|
|
|
|
{
|
|
|
|
struct ethsw_port_priv *port_priv = netdev_priv(netdev);
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = dpaa2_switch_port_set_stp_state(port_priv, state);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
switch (state) {
|
|
|
|
case BR_STATE_DISABLED:
|
|
|
|
case BR_STATE_BLOCKING:
|
|
|
|
case BR_STATE_LISTENING:
|
|
|
|
err = dpaa2_switch_port_set_learning(port_priv, false);
|
|
|
|
break;
|
|
|
|
case BR_STATE_LEARNING:
|
|
|
|
case BR_STATE_FORWARDING:
|
|
|
|
err = dpaa2_switch_port_set_learning(port_priv,
|
|
|
|
port_priv->learn_ena);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2021-03-23 04:58:57 +08:00
|
|
|
static int dpaa2_switch_port_flood(struct ethsw_port_priv *port_priv,
|
|
|
|
struct switchdev_brport_flags flags)
|
|
|
|
{
|
|
|
|
struct ethsw_core *ethsw = port_priv->ethsw_data;
|
|
|
|
|
|
|
|
if (flags.mask & BR_BCAST_FLOOD)
|
|
|
|
port_priv->bcast_flood = !!(flags.val & BR_BCAST_FLOOD);
|
|
|
|
|
2021-03-23 04:58:58 +08:00
|
|
|
if (flags.mask & BR_FLOOD)
|
|
|
|
port_priv->ucast_flood = !!(flags.val & BR_FLOOD);
|
|
|
|
|
2021-03-23 04:58:57 +08:00
|
|
|
return dpaa2_switch_fdb_set_egress_flood(ethsw, port_priv->fdb->fdb_id);
|
|
|
|
}
|
|
|
|
|
2021-03-23 04:58:56 +08:00
|
|
|
static int dpaa2_switch_port_pre_bridge_flags(struct net_device *netdev,
|
|
|
|
struct switchdev_brport_flags flags,
|
|
|
|
struct netlink_ext_ack *extack)
|
|
|
|
{
|
2021-03-23 04:58:58 +08:00
|
|
|
if (flags.mask & ~(BR_LEARNING | BR_BCAST_FLOOD | BR_FLOOD |
|
|
|
|
BR_MCAST_FLOOD))
|
2021-03-23 04:58:56 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
2021-03-23 04:58:58 +08:00
|
|
|
if (flags.mask & (BR_FLOOD | BR_MCAST_FLOOD)) {
|
|
|
|
bool multicast = !!(flags.val & BR_MCAST_FLOOD);
|
|
|
|
bool unicast = !!(flags.val & BR_FLOOD);
|
|
|
|
|
|
|
|
if (unicast != multicast) {
|
|
|
|
NL_SET_ERR_MSG_MOD(extack,
|
|
|
|
"Cannot configure multicast flooding independently of unicast");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-23 04:58:56 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int dpaa2_switch_port_bridge_flags(struct net_device *netdev,
|
|
|
|
struct switchdev_brport_flags flags,
|
|
|
|
struct netlink_ext_ack *extack)
|
|
|
|
{
|
|
|
|
struct ethsw_port_priv *port_priv = netdev_priv(netdev);
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (flags.mask & BR_LEARNING) {
|
|
|
|
bool learn_ena = !!(flags.val & BR_LEARNING);
|
|
|
|
|
|
|
|
err = dpaa2_switch_port_set_learning(port_priv, learn_ena);
|
|
|
|
if (err)
|
|
|
|
return err;
|
2021-03-30 22:54:17 +08:00
|
|
|
port_priv->learn_ena = learn_ena;
|
2021-03-23 04:58:56 +08:00
|
|
|
}
|
|
|
|
|
2021-03-23 04:58:58 +08:00
|
|
|
if (flags.mask & (BR_BCAST_FLOOD | BR_FLOOD | BR_MCAST_FLOOD)) {
|
2021-03-23 04:58:57 +08:00
|
|
|
err = dpaa2_switch_port_flood(port_priv, flags);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2021-03-23 04:58:56 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-10-09 23:30:00 +08:00
|
|
|
static int dpaa2_switch_port_attr_set(struct net_device *netdev,
|
2021-03-10 20:14:49 +08:00
|
|
|
const struct switchdev_attr *attr,
|
|
|
|
struct netlink_ext_ack *extack)
|
2018-03-14 23:55:54 +08:00
|
|
|
{
|
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
switch (attr->id) {
|
|
|
|
case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
|
net: switchdev: remove the transaction structure from port attributes
Since the introduction of the switchdev API, port attributes were
transmitted to drivers for offloading using a two-step transactional
model, with a prepare phase that was supposed to catch all errors, and a
commit phase that was supposed to never fail.
Some classes of failures can never be avoided, like hardware access, or
memory allocation. In the latter case, merely attempting to move the
memory allocation to the preparation phase makes it impossible to avoid
memory leaks, since commit 91cf8eceffc1 ("switchdev: Remove unused
transaction item queue") which has removed the unused mechanism of
passing on the allocated memory between one phase and another.
It is time we admit that separating the preparation from the commit
phase is something that is best left for the driver to decide, and not
something that should be baked into the API, especially since there are
no switchdev callers that depend on this.
This patch removes the struct switchdev_trans member from switchdev port
attribute notifier structures, and converts drivers to not look at this
member.
In part, this patch contains a revert of my previous commit 2e554a7a5d8a
("net: dsa: propagate switchdev vlan_filtering prepare phase to
drivers").
For the most part, the conversion was trivial except for:
- Rocker's world implementation based on Broadcom OF-DPA had an odd
implementation of ofdpa_port_attr_bridge_flags_set. The conversion was
done mechanically, by pasting the implementation twice, then only
keeping the code that would get executed during prepare phase on top,
then only keeping the code that gets executed during the commit phase
on bottom, then simplifying the resulting code until this was obtained.
- DSA's offloading of STP state, bridge flags, VLAN filtering and
multicast router could be converted right away. But the ageing time
could not, so a shim was introduced and this was left for a further
commit.
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Acked-by: Linus Walleij <linus.walleij@linaro.org>
Acked-by: Jiri Pirko <jiri@nvidia.com>
Reviewed-by: Kurt Kanzenbach <kurt@linutronix.de> # hellcreek
Reviewed-by: Linus Walleij <linus.walleij@linaro.org> # RTL8366RB
Reviewed-by: Ido Schimmel <idosch@nvidia.com>
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2021-01-09 08:01:50 +08:00
|
|
|
err = dpaa2_switch_port_attr_stp_state_set(netdev,
|
2020-10-09 23:30:00 +08:00
|
|
|
attr->u.stp_state);
|
2018-03-14 23:55:54 +08:00
|
|
|
break;
|
|
|
|
case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
|
2021-03-10 20:14:49 +08:00
|
|
|
if (!attr->u.vlan_filtering) {
|
|
|
|
NL_SET_ERR_MSG_MOD(extack,
|
|
|
|
"The DPAA2 switch does not support VLAN-unaware operation");
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
2018-03-14 23:55:54 +08:00
|
|
|
break;
|
2021-03-23 04:58:56 +08:00
|
|
|
case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
|
|
|
|
err = dpaa2_switch_port_pre_bridge_flags(netdev, attr->u.brport_flags, extack);
|
|
|
|
break;
|
|
|
|
case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
|
|
|
|
err = dpaa2_switch_port_bridge_flags(netdev, attr->u.brport_flags, extack);
|
|
|
|
break;
|
2018-03-14 23:55:54 +08:00
|
|
|
default:
|
|
|
|
err = -EOPNOTSUPP;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2021-03-10 20:14:49 +08:00
|
|
|
int dpaa2_switch_port_vlans_add(struct net_device *netdev,
|
|
|
|
const struct switchdev_obj_port_vlan *vlan)
|
2018-03-14 23:55:54 +08:00
|
|
|
{
|
|
|
|
struct ethsw_port_priv *port_priv = netdev_priv(netdev);
|
2020-07-21 17:19:19 +08:00
|
|
|
struct ethsw_core *ethsw = port_priv->ethsw_data;
|
|
|
|
struct dpsw_attr *attr = ðsw->sw_attr;
|
net: switchdev: remove vid_begin -> vid_end range from VLAN objects
The call path of a switchdev VLAN addition to the bridge looks something
like this today:
nbp_vlan_init
| __br_vlan_set_default_pvid
| | |
| | br_afspec |
| | | |
| | v |
| | br_process_vlan_info |
| | | |
| | v |
| | br_vlan_info |
| | / \ /
| | / \ /
| | / \ /
| | / \ /
v v v v v
nbp_vlan_add br_vlan_add ------+
| ^ ^ | |
| / | | |
| / / / |
\ br_vlan_get_master/ / v
\ ^ / / br_vlan_add_existing
\ | / / |
\ | / / /
\ | / / /
\ | / / /
\ | / / /
v | | v /
__vlan_add /
/ | /
/ | /
v | /
__vlan_vid_add | /
\ | /
v v v
br_switchdev_port_vlan_add
The ranges UAPI was introduced to the bridge in commit bdced7ef7838
("bridge: support for multiple vlans and vlan ranges in setlink and
dellink requests") (Jan 10 2015). But the VLAN ranges (parsed in br_afspec)
have always been passed one by one, through struct bridge_vlan_info
tmp_vinfo, to br_vlan_info. So the range never went too far in depth.
Then Scott Feldman introduced the switchdev_port_bridge_setlink function
in commit 47f8328bb1a4 ("switchdev: add new switchdev bridge setlink").
That marked the introduction of the SWITCHDEV_OBJ_PORT_VLAN, which made
full use of the range. But switchdev_port_bridge_setlink was called like
this:
br_setlink
-> br_afspec
-> switchdev_port_bridge_setlink
Basically, the switchdev and the bridge code were not tightly integrated.
Then commit 41c498b9359e ("bridge: restore br_setlink back to original")
came, and switchdev drivers were required to implement
.ndo_bridge_setlink = switchdev_port_bridge_setlink for a while.
In the meantime, commits such as 0944d6b5a2fa ("bridge: try switchdev op
first in __vlan_vid_add/del") finally made switchdev penetrate the
br_vlan_info() barrier and start to develop the call path we have today.
But remember, br_vlan_info() still receives VLANs one by one.
Then Arkadi Sharshevsky refactored the switchdev API in 2017 in commit
29ab586c3d83 ("net: switchdev: Remove bridge bypass support from
switchdev") so that drivers would not implement .ndo_bridge_setlink any
longer. The switchdev_port_bridge_setlink also got deleted.
This refactoring removed the parallel bridge_setlink implementation from
switchdev, and left the only switchdev VLAN objects to be the ones
offloaded from __vlan_vid_add (basically RX filtering) and __vlan_add
(the latter coming from commit 9c86ce2c1ae3 ("net: bridge: Notify about
bridge VLANs")).
That is to say, today the switchdev VLAN object ranges are not used in
the kernel. Refactoring the above call path is a bit complicated, when
the bridge VLAN call path is already a bit complicated.
Let's go off and finish the job of commit 29ab586c3d83 by deleting the
bogus iteration through the VLAN ranges from the drivers. Some aspects
of this feature never made too much sense in the first place. For
example, what is a range of VLANs all having the BRIDGE_VLAN_INFO_PVID
flag supposed to mean, when a port can obviously have a single pvid?
This particular configuration _is_ denied as of commit 6623c60dc28e
("bridge: vlan: enforce no pvid flag in vlan ranges"), but from an API
perspective, the driver still has to play pretend, and only offload the
vlan->vid_end as pvid. And the addition of a switchdev VLAN object can
modify the flags of another, completely unrelated, switchdev VLAN
object! (a VLAN that is PVID will invalidate the PVID flag from whatever
other VLAN had previously been offloaded with switchdev and had that
flag. Yet switchdev never notifies about that change, drivers are
supposed to guess).
Nonetheless, having a VLAN range in the API makes error handling look
scarier than it really is - unwinding on errors and all of that.
When in reality, no one really calls this API with more than one VLAN.
It is all unnecessary complexity.
And despite appearing pretentious (two-phase transactional model and
all), the switchdev API is really sloppy because the VLAN addition and
removal operations are not paired with one another (you can add a VLAN
100 times and delete it just once). The bridge notifies through
switchdev of a VLAN addition not only when the flags of an existing VLAN
change, but also when nothing changes. There are switchdev drivers out
there who don't like adding a VLAN that has already been added, and
those checks don't really belong at driver level. But the fact that the
API contains ranges is yet another factor that prevents this from being
addressed in the future.
Of the existing switchdev pieces of hardware, it appears that only
Mellanox Spectrum supports offloading more than one VLAN at a time,
through mlxsw_sp_port_vlan_set. I have kept that code internal to the
driver, because there is some more bookkeeping that makes use of it, but
I deleted it from the switchdev API. But since the switchdev support for
ranges has already been de facto deleted by a Mellanox employee and
nobody noticed for 4 years, I'm going to assume it's not a biggie.
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Reviewed-by: Ido Schimmel <idosch@nvidia.com> # switchdev and mlxsw
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Reviewed-by: Kurt Kanzenbach <kurt@linutronix.de> # hellcreek
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2021-01-09 08:01:46 +08:00
|
|
|
int err = 0;
|
2020-07-21 17:19:19 +08:00
|
|
|
|
net: switchdev: remove the transaction structure from port object notifiers
Since the introduction of the switchdev API, port objects were
transmitted to drivers for offloading using a two-step transactional
model, with a prepare phase that was supposed to catch all errors, and a
commit phase that was supposed to never fail.
Some classes of failures can never be avoided, like hardware access, or
memory allocation. In the latter case, merely attempting to move the
memory allocation to the preparation phase makes it impossible to avoid
memory leaks, since commit 91cf8eceffc1 ("switchdev: Remove unused
transaction item queue") which has removed the unused mechanism of
passing on the allocated memory between one phase and another.
It is time we admit that separating the preparation from the commit
phase is something that is best left for the driver to decide, and not
something that should be baked into the API, especially since there are
no switchdev callers that depend on this.
This patch removes the struct switchdev_trans member from switchdev port
object notifier structures, and converts drivers to not look at this
member.
Where driver conversion is trivial (like in the case of the Marvell
Prestera driver, NXP DPAA2 switch, TI CPSW, and Rocker drivers), it is
done in this patch.
Where driver conversion needs more attention (DSA, Mellanox Spectrum),
the conversion is left for subsequent patches and here we only fake the
prepare/commit phases at a lower level, just not in the switchdev
notifier itself.
Where the code has a natural structure that is best left alone as a
preparation and a commit phase (as in the case of the Ocelot switch),
that structure is left in place, just made to not depend upon the
switchdev transactional model.
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Acked-by: Linus Walleij <linus.walleij@linaro.org>
Acked-by: Jiri Pirko <jiri@nvidia.com>
Reviewed-by: Ido Schimmel <idosch@nvidia.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2021-01-09 08:01:48 +08:00
|
|
|
/* Make sure that the VLAN is not already configured
|
|
|
|
* on the switch port
|
|
|
|
*/
|
|
|
|
if (port_priv->vlans[vlan->vid] & ETHSW_VLAN_MEMBER)
|
|
|
|
return -EEXIST;
|
2020-11-20 00:50:16 +08:00
|
|
|
|
net: switchdev: remove the transaction structure from port object notifiers
Since the introduction of the switchdev API, port objects were
transmitted to drivers for offloading using a two-step transactional
model, with a prepare phase that was supposed to catch all errors, and a
commit phase that was supposed to never fail.
Some classes of failures can never be avoided, like hardware access, or
memory allocation. In the latter case, merely attempting to move the
memory allocation to the preparation phase makes it impossible to avoid
memory leaks, since commit 91cf8eceffc1 ("switchdev: Remove unused
transaction item queue") which has removed the unused mechanism of
passing on the allocated memory between one phase and another.
It is time we admit that separating the preparation from the commit
phase is something that is best left for the driver to decide, and not
something that should be baked into the API, especially since there are
no switchdev callers that depend on this.
This patch removes the struct switchdev_trans member from switchdev port
object notifier structures, and converts drivers to not look at this
member.
Where driver conversion is trivial (like in the case of the Marvell
Prestera driver, NXP DPAA2 switch, TI CPSW, and Rocker drivers), it is
done in this patch.
Where driver conversion needs more attention (DSA, Mellanox Spectrum),
the conversion is left for subsequent patches and here we only fake the
prepare/commit phases at a lower level, just not in the switchdev
notifier itself.
Where the code has a natural structure that is best left alone as a
preparation and a commit phase (as in the case of the Ocelot switch),
that structure is left in place, just made to not depend upon the
switchdev transactional model.
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Acked-by: Linus Walleij <linus.walleij@linaro.org>
Acked-by: Jiri Pirko <jiri@nvidia.com>
Reviewed-by: Ido Schimmel <idosch@nvidia.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2021-01-09 08:01:48 +08:00
|
|
|
/* Check if there is space for a new VLAN */
|
|
|
|
err = dpsw_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle,
|
|
|
|
ðsw->sw_attr);
|
|
|
|
if (err) {
|
|
|
|
netdev_err(netdev, "dpsw_get_attributes err %d\n", err);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
if (attr->max_vlans - attr->num_vlans < 1)
|
|
|
|
return -ENOSPC;
|
2018-03-14 23:55:54 +08:00
|
|
|
|
net: switchdev: remove the transaction structure from port object notifiers
Since the introduction of the switchdev API, port objects were
transmitted to drivers for offloading using a two-step transactional
model, with a prepare phase that was supposed to catch all errors, and a
commit phase that was supposed to never fail.
Some classes of failures can never be avoided, like hardware access, or
memory allocation. In the latter case, merely attempting to move the
memory allocation to the preparation phase makes it impossible to avoid
memory leaks, since commit 91cf8eceffc1 ("switchdev: Remove unused
transaction item queue") which has removed the unused mechanism of
passing on the allocated memory between one phase and another.
It is time we admit that separating the preparation from the commit
phase is something that is best left for the driver to decide, and not
something that should be baked into the API, especially since there are
no switchdev callers that depend on this.
This patch removes the struct switchdev_trans member from switchdev port
object notifier structures, and converts drivers to not look at this
member.
Where driver conversion is trivial (like in the case of the Marvell
Prestera driver, NXP DPAA2 switch, TI CPSW, and Rocker drivers), it is
done in this patch.
Where driver conversion needs more attention (DSA, Mellanox Spectrum),
the conversion is left for subsequent patches and here we only fake the
prepare/commit phases at a lower level, just not in the switchdev
notifier itself.
Where the code has a natural structure that is best left alone as a
preparation and a commit phase (as in the case of the Ocelot switch),
that structure is left in place, just made to not depend upon the
switchdev transactional model.
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Acked-by: Linus Walleij <linus.walleij@linaro.org>
Acked-by: Jiri Pirko <jiri@nvidia.com>
Reviewed-by: Ido Schimmel <idosch@nvidia.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2021-01-09 08:01:48 +08:00
|
|
|
/* Check if there is space for a new VLAN */
|
|
|
|
err = dpsw_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle,
|
|
|
|
ðsw->sw_attr);
|
|
|
|
if (err) {
|
|
|
|
netdev_err(netdev, "dpsw_get_attributes err %d\n", err);
|
|
|
|
return err;
|
2020-07-21 17:19:19 +08:00
|
|
|
}
|
net: switchdev: remove the transaction structure from port object notifiers
Since the introduction of the switchdev API, port objects were
transmitted to drivers for offloading using a two-step transactional
model, with a prepare phase that was supposed to catch all errors, and a
commit phase that was supposed to never fail.
Some classes of failures can never be avoided, like hardware access, or
memory allocation. In the latter case, merely attempting to move the
memory allocation to the preparation phase makes it impossible to avoid
memory leaks, since commit 91cf8eceffc1 ("switchdev: Remove unused
transaction item queue") which has removed the unused mechanism of
passing on the allocated memory between one phase and another.
It is time we admit that separating the preparation from the commit
phase is something that is best left for the driver to decide, and not
something that should be baked into the API, especially since there are
no switchdev callers that depend on this.
This patch removes the struct switchdev_trans member from switchdev port
object notifier structures, and converts drivers to not look at this
member.
Where driver conversion is trivial (like in the case of the Marvell
Prestera driver, NXP DPAA2 switch, TI CPSW, and Rocker drivers), it is
done in this patch.
Where driver conversion needs more attention (DSA, Mellanox Spectrum),
the conversion is left for subsequent patches and here we only fake the
prepare/commit phases at a lower level, just not in the switchdev
notifier itself.
Where the code has a natural structure that is best left alone as a
preparation and a commit phase (as in the case of the Ocelot switch),
that structure is left in place, just made to not depend upon the
switchdev transactional model.
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Acked-by: Linus Walleij <linus.walleij@linaro.org>
Acked-by: Jiri Pirko <jiri@nvidia.com>
Reviewed-by: Ido Schimmel <idosch@nvidia.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2021-01-09 08:01:48 +08:00
|
|
|
if (attr->max_vlans - attr->num_vlans < 1)
|
|
|
|
return -ENOSPC;
|
2018-03-14 23:55:54 +08:00
|
|
|
|
net: switchdev: remove vid_begin -> vid_end range from VLAN objects
The call path of a switchdev VLAN addition to the bridge looks something
like this today:
nbp_vlan_init
| __br_vlan_set_default_pvid
| | |
| | br_afspec |
| | | |
| | v |
| | br_process_vlan_info |
| | | |
| | v |
| | br_vlan_info |
| | / \ /
| | / \ /
| | / \ /
| | / \ /
v v v v v
nbp_vlan_add br_vlan_add ------+
| ^ ^ | |
| / | | |
| / / / |
\ br_vlan_get_master/ / v
\ ^ / / br_vlan_add_existing
\ | / / |
\ | / / /
\ | / / /
\ | / / /
\ | / / /
v | | v /
__vlan_add /
/ | /
/ | /
v | /
__vlan_vid_add | /
\ | /
v v v
br_switchdev_port_vlan_add
The ranges UAPI was introduced to the bridge in commit bdced7ef7838
("bridge: support for multiple vlans and vlan ranges in setlink and
dellink requests") (Jan 10 2015). But the VLAN ranges (parsed in br_afspec)
have always been passed one by one, through struct bridge_vlan_info
tmp_vinfo, to br_vlan_info. So the range never went too far in depth.
Then Scott Feldman introduced the switchdev_port_bridge_setlink function
in commit 47f8328bb1a4 ("switchdev: add new switchdev bridge setlink").
That marked the introduction of the SWITCHDEV_OBJ_PORT_VLAN, which made
full use of the range. But switchdev_port_bridge_setlink was called like
this:
br_setlink
-> br_afspec
-> switchdev_port_bridge_setlink
Basically, the switchdev and the bridge code were not tightly integrated.
Then commit 41c498b9359e ("bridge: restore br_setlink back to original")
came, and switchdev drivers were required to implement
.ndo_bridge_setlink = switchdev_port_bridge_setlink for a while.
In the meantime, commits such as 0944d6b5a2fa ("bridge: try switchdev op
first in __vlan_vid_add/del") finally made switchdev penetrate the
br_vlan_info() barrier and start to develop the call path we have today.
But remember, br_vlan_info() still receives VLANs one by one.
Then Arkadi Sharshevsky refactored the switchdev API in 2017 in commit
29ab586c3d83 ("net: switchdev: Remove bridge bypass support from
switchdev") so that drivers would not implement .ndo_bridge_setlink any
longer. The switchdev_port_bridge_setlink also got deleted.
This refactoring removed the parallel bridge_setlink implementation from
switchdev, and left the only switchdev VLAN objects to be the ones
offloaded from __vlan_vid_add (basically RX filtering) and __vlan_add
(the latter coming from commit 9c86ce2c1ae3 ("net: bridge: Notify about
bridge VLANs")).
That is to say, today the switchdev VLAN object ranges are not used in
the kernel. Refactoring the above call path is a bit complicated, when
the bridge VLAN call path is already a bit complicated.
Let's go off and finish the job of commit 29ab586c3d83 by deleting the
bogus iteration through the VLAN ranges from the drivers. Some aspects
of this feature never made too much sense in the first place. For
example, what is a range of VLANs all having the BRIDGE_VLAN_INFO_PVID
flag supposed to mean, when a port can obviously have a single pvid?
This particular configuration _is_ denied as of commit 6623c60dc28e
("bridge: vlan: enforce no pvid flag in vlan ranges"), but from an API
perspective, the driver still has to play pretend, and only offload the
vlan->vid_end as pvid. And the addition of a switchdev VLAN object can
modify the flags of another, completely unrelated, switchdev VLAN
object! (a VLAN that is PVID will invalidate the PVID flag from whatever
other VLAN had previously been offloaded with switchdev and had that
flag. Yet switchdev never notifies about that change, drivers are
supposed to guess).
Nonetheless, having a VLAN range in the API makes error handling look
scarier than it really is - unwinding on errors and all of that.
When in reality, no one really calls this API with more than one VLAN.
It is all unnecessary complexity.
And despite appearing pretentious (two-phase transactional model and
all), the switchdev API is really sloppy because the VLAN addition and
removal operations are not paired with one another (you can add a VLAN
100 times and delete it just once). The bridge notifies through
switchdev of a VLAN addition not only when the flags of an existing VLAN
change, but also when nothing changes. There are switchdev drivers out
there who don't like adding a VLAN that has already been added, and
those checks don't really belong at driver level. But the fact that the
API contains ranges is yet another factor that prevents this from being
addressed in the future.
Of the existing switchdev pieces of hardware, it appears that only
Mellanox Spectrum supports offloading more than one VLAN at a time,
through mlxsw_sp_port_vlan_set. I have kept that code internal to the
driver, because there is some more bookkeeping that makes use of it, but
I deleted it from the switchdev API. But since the switchdev support for
ranges has already been de facto deleted by a Mellanox employee and
nobody noticed for 4 years, I'm going to assume it's not a biggie.
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Reviewed-by: Ido Schimmel <idosch@nvidia.com> # switchdev and mlxsw
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Reviewed-by: Kurt Kanzenbach <kurt@linutronix.de> # hellcreek
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2021-01-09 08:01:46 +08:00
|
|
|
if (!port_priv->ethsw_data->vlans[vlan->vid]) {
|
|
|
|
/* this is a new VLAN */
|
staging: dpaa2-switch: properly setup switching domains
Until now, the DPAA2 switch was not capable to properly setup its
switching domains depending on the existence, or lack thereof, of a
upper bridge device. This meant that all switch ports of a DPSW object
were switching by default even though they were not under the same
bridge device.
Another issue was the inability to actually add the CPU in the flooding
domains (broadcast, unknown unicast etc) of a particular switch port.
This meant that a simple ping on a switch interface was not possible
since no broadcast ARP frame would actually reach the CPU queues.
This patch tries to fix exactly these problems by:
* Creating and managing a FDB table for each flooding domain. This means
that when a switch interface is not bridged it will use its own FDB
table. While in bridged mode all DPAA2 switch interfaces under the
same upper will use the same FDB table, thus leverage the same FDB
entries.
* Adding a new MC firmware command - dpsw_set_egress_flood() - through
which the driver can setup the flooding domains as needed. For
example, when the switch interface is standalone, thus not in a
bridge with any other DPAA2 switch port, it will setup its broadcast
and unknown unicast flooding domains to only include the control
interface (the queues that reach the CPU and the driver can dequeue
from). This flooding domain changes when the interface joins a bridge
and is configured to include, beside the control interface, all other
DPAA2 switch interfaces.
We impose a minimum limit of FDB tables available equal to the number of
switch interfaces so that we guarantee that, in the maximal
configuration - all interfaces are standalone, each switch port will
have a private FDB table. At the same time, we only probe DPSW objects
that have the flooding and broadcast replicators configured to be per
FDB (DPSW_*_PER_FDB). Without this, the dpaa2-switch driver would not
be able to configure multiple switching domains.
At probe time, a FDB table will be allocated for each port. At a bridge
join event, the switch port will either continue to use the current FDB
table (if it's the first dpaa2-switch port to join that bridge) or will
switch to use the FDB table associated with the port that it's already
under the bridge. If a FDB switch is necessary, the private FDB table
which was previously used will be returned to the pool of unused FDBs.
Upon a bridge leave, the switch port needs a private FDB table thus it
will search and get the first unused FDB table. This way, all the other
ports remaining under the bridge will continue to use the same FDB
table.
Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
Reviewed-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-03-10 20:14:47 +08:00
|
|
|
err = dpaa2_switch_add_vlan(port_priv, vlan->vid);
|
2018-03-14 23:55:54 +08:00
|
|
|
if (err)
|
net: switchdev: remove vid_begin -> vid_end range from VLAN objects
The call path of a switchdev VLAN addition to the bridge looks something
like this today:
nbp_vlan_init
| __br_vlan_set_default_pvid
| | |
| | br_afspec |
| | | |
| | v |
| | br_process_vlan_info |
| | | |
| | v |
| | br_vlan_info |
| | / \ /
| | / \ /
| | / \ /
| | / \ /
v v v v v
nbp_vlan_add br_vlan_add ------+
| ^ ^ | |
| / | | |
| / / / |
\ br_vlan_get_master/ / v
\ ^ / / br_vlan_add_existing
\ | / / |
\ | / / /
\ | / / /
\ | / / /
\ | / / /
v | | v /
__vlan_add /
/ | /
/ | /
v | /
__vlan_vid_add | /
\ | /
v v v
br_switchdev_port_vlan_add
The ranges UAPI was introduced to the bridge in commit bdced7ef7838
("bridge: support for multiple vlans and vlan ranges in setlink and
dellink requests") (Jan 10 2015). But the VLAN ranges (parsed in br_afspec)
have always been passed one by one, through struct bridge_vlan_info
tmp_vinfo, to br_vlan_info. So the range never went too far in depth.
Then Scott Feldman introduced the switchdev_port_bridge_setlink function
in commit 47f8328bb1a4 ("switchdev: add new switchdev bridge setlink").
That marked the introduction of the SWITCHDEV_OBJ_PORT_VLAN, which made
full use of the range. But switchdev_port_bridge_setlink was called like
this:
br_setlink
-> br_afspec
-> switchdev_port_bridge_setlink
Basically, the switchdev and the bridge code were not tightly integrated.
Then commit 41c498b9359e ("bridge: restore br_setlink back to original")
came, and switchdev drivers were required to implement
.ndo_bridge_setlink = switchdev_port_bridge_setlink for a while.
In the meantime, commits such as 0944d6b5a2fa ("bridge: try switchdev op
first in __vlan_vid_add/del") finally made switchdev penetrate the
br_vlan_info() barrier and start to develop the call path we have today.
But remember, br_vlan_info() still receives VLANs one by one.
Then Arkadi Sharshevsky refactored the switchdev API in 2017 in commit
29ab586c3d83 ("net: switchdev: Remove bridge bypass support from
switchdev") so that drivers would not implement .ndo_bridge_setlink any
longer. The switchdev_port_bridge_setlink also got deleted.
This refactoring removed the parallel bridge_setlink implementation from
switchdev, and left the only switchdev VLAN objects to be the ones
offloaded from __vlan_vid_add (basically RX filtering) and __vlan_add
(the latter coming from commit 9c86ce2c1ae3 ("net: bridge: Notify about
bridge VLANs")).
That is to say, today the switchdev VLAN object ranges are not used in
the kernel. Refactoring the above call path is a bit complicated, when
the bridge VLAN call path is already a bit complicated.
Let's go off and finish the job of commit 29ab586c3d83 by deleting the
bogus iteration through the VLAN ranges from the drivers. Some aspects
of this feature never made too much sense in the first place. For
example, what is a range of VLANs all having the BRIDGE_VLAN_INFO_PVID
flag supposed to mean, when a port can obviously have a single pvid?
This particular configuration _is_ denied as of commit 6623c60dc28e
("bridge: vlan: enforce no pvid flag in vlan ranges"), but from an API
perspective, the driver still has to play pretend, and only offload the
vlan->vid_end as pvid. And the addition of a switchdev VLAN object can
modify the flags of another, completely unrelated, switchdev VLAN
object! (a VLAN that is PVID will invalidate the PVID flag from whatever
other VLAN had previously been offloaded with switchdev and had that
flag. Yet switchdev never notifies about that change, drivers are
supposed to guess).
Nonetheless, having a VLAN range in the API makes error handling look
scarier than it really is - unwinding on errors and all of that.
When in reality, no one really calls this API with more than one VLAN.
It is all unnecessary complexity.
And despite appearing pretentious (two-phase transactional model and
all), the switchdev API is really sloppy because the VLAN addition and
removal operations are not paired with one another (you can add a VLAN
100 times and delete it just once). The bridge notifies through
switchdev of a VLAN addition not only when the flags of an existing VLAN
change, but also when nothing changes. There are switchdev drivers out
there who don't like adding a VLAN that has already been added, and
those checks don't really belong at driver level. But the fact that the
API contains ranges is yet another factor that prevents this from being
addressed in the future.
Of the existing switchdev pieces of hardware, it appears that only
Mellanox Spectrum supports offloading more than one VLAN at a time,
through mlxsw_sp_port_vlan_set. I have kept that code internal to the
driver, because there is some more bookkeeping that makes use of it, but
I deleted it from the switchdev API. But since the switchdev support for
ranges has already been de facto deleted by a Mellanox employee and
nobody noticed for 4 years, I'm going to assume it's not a biggie.
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Reviewed-by: Ido Schimmel <idosch@nvidia.com> # switchdev and mlxsw
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Reviewed-by: Kurt Kanzenbach <kurt@linutronix.de> # hellcreek
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2021-01-09 08:01:46 +08:00
|
|
|
return err;
|
|
|
|
|
|
|
|
port_priv->ethsw_data->vlans[vlan->vid] |= ETHSW_VLAN_GLOBAL;
|
2018-03-14 23:55:54 +08:00
|
|
|
}
|
|
|
|
|
net: switchdev: remove vid_begin -> vid_end range from VLAN objects
The call path of a switchdev VLAN addition to the bridge looks something
like this today:
nbp_vlan_init
| __br_vlan_set_default_pvid
| | |
| | br_afspec |
| | | |
| | v |
| | br_process_vlan_info |
| | | |
| | v |
| | br_vlan_info |
| | / \ /
| | / \ /
| | / \ /
| | / \ /
v v v v v
nbp_vlan_add br_vlan_add ------+
| ^ ^ | |
| / | | |
| / / / |
\ br_vlan_get_master/ / v
\ ^ / / br_vlan_add_existing
\ | / / |
\ | / / /
\ | / / /
\ | / / /
\ | / / /
v | | v /
__vlan_add /
/ | /
/ | /
v | /
__vlan_vid_add | /
\ | /
v v v
br_switchdev_port_vlan_add
The ranges UAPI was introduced to the bridge in commit bdced7ef7838
("bridge: support for multiple vlans and vlan ranges in setlink and
dellink requests") (Jan 10 2015). But the VLAN ranges (parsed in br_afspec)
have always been passed one by one, through struct bridge_vlan_info
tmp_vinfo, to br_vlan_info. So the range never went too far in depth.
Then Scott Feldman introduced the switchdev_port_bridge_setlink function
in commit 47f8328bb1a4 ("switchdev: add new switchdev bridge setlink").
That marked the introduction of the SWITCHDEV_OBJ_PORT_VLAN, which made
full use of the range. But switchdev_port_bridge_setlink was called like
this:
br_setlink
-> br_afspec
-> switchdev_port_bridge_setlink
Basically, the switchdev and the bridge code were not tightly integrated.
Then commit 41c498b9359e ("bridge: restore br_setlink back to original")
came, and switchdev drivers were required to implement
.ndo_bridge_setlink = switchdev_port_bridge_setlink for a while.
In the meantime, commits such as 0944d6b5a2fa ("bridge: try switchdev op
first in __vlan_vid_add/del") finally made switchdev penetrate the
br_vlan_info() barrier and start to develop the call path we have today.
But remember, br_vlan_info() still receives VLANs one by one.
Then Arkadi Sharshevsky refactored the switchdev API in 2017 in commit
29ab586c3d83 ("net: switchdev: Remove bridge bypass support from
switchdev") so that drivers would not implement .ndo_bridge_setlink any
longer. The switchdev_port_bridge_setlink also got deleted.
This refactoring removed the parallel bridge_setlink implementation from
switchdev, and left the only switchdev VLAN objects to be the ones
offloaded from __vlan_vid_add (basically RX filtering) and __vlan_add
(the latter coming from commit 9c86ce2c1ae3 ("net: bridge: Notify about
bridge VLANs")).
That is to say, today the switchdev VLAN object ranges are not used in
the kernel. Refactoring the above call path is a bit complicated, when
the bridge VLAN call path is already a bit complicated.
Let's go off and finish the job of commit 29ab586c3d83 by deleting the
bogus iteration through the VLAN ranges from the drivers. Some aspects
of this feature never made too much sense in the first place. For
example, what is a range of VLANs all having the BRIDGE_VLAN_INFO_PVID
flag supposed to mean, when a port can obviously have a single pvid?
This particular configuration _is_ denied as of commit 6623c60dc28e
("bridge: vlan: enforce no pvid flag in vlan ranges"), but from an API
perspective, the driver still has to play pretend, and only offload the
vlan->vid_end as pvid. And the addition of a switchdev VLAN object can
modify the flags of another, completely unrelated, switchdev VLAN
object! (a VLAN that is PVID will invalidate the PVID flag from whatever
other VLAN had previously been offloaded with switchdev and had that
flag. Yet switchdev never notifies about that change, drivers are
supposed to guess).
Nonetheless, having a VLAN range in the API makes error handling look
scarier than it really is - unwinding on errors and all of that.
When in reality, no one really calls this API with more than one VLAN.
It is all unnecessary complexity.
And despite appearing pretentious (two-phase transactional model and
all), the switchdev API is really sloppy because the VLAN addition and
removal operations are not paired with one another (you can add a VLAN
100 times and delete it just once). The bridge notifies through
switchdev of a VLAN addition not only when the flags of an existing VLAN
change, but also when nothing changes. There are switchdev drivers out
there who don't like adding a VLAN that has already been added, and
those checks don't really belong at driver level. But the fact that the
API contains ranges is yet another factor that prevents this from being
addressed in the future.
Of the existing switchdev pieces of hardware, it appears that only
Mellanox Spectrum supports offloading more than one VLAN at a time,
through mlxsw_sp_port_vlan_set. I have kept that code internal to the
driver, because there is some more bookkeeping that makes use of it, but
I deleted it from the switchdev API. But since the switchdev support for
ranges has already been de facto deleted by a Mellanox employee and
nobody noticed for 4 years, I'm going to assume it's not a biggie.
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Reviewed-by: Ido Schimmel <idosch@nvidia.com> # switchdev and mlxsw
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Reviewed-by: Kurt Kanzenbach <kurt@linutronix.de> # hellcreek
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2021-01-09 08:01:46 +08:00
|
|
|
return dpaa2_switch_port_add_vlan(port_priv, vlan->vid, vlan->flags);
|
2018-03-14 23:55:54 +08:00
|
|
|
}
|
|
|
|
|
2020-10-09 23:30:00 +08:00
|
|
|
static int dpaa2_switch_port_lookup_address(struct net_device *netdev, int is_uc,
|
|
|
|
const unsigned char *addr)
|
2018-03-14 23:55:54 +08:00
|
|
|
{
|
|
|
|
struct netdev_hw_addr_list *list = (is_uc) ? &netdev->uc : &netdev->mc;
|
|
|
|
struct netdev_hw_addr *ha;
|
|
|
|
|
|
|
|
netif_addr_lock_bh(netdev);
|
|
|
|
list_for_each_entry(ha, &list->list, list) {
|
|
|
|
if (ether_addr_equal(ha->addr, addr)) {
|
|
|
|
netif_addr_unlock_bh(netdev);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
netif_addr_unlock_bh(netdev);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-10-09 23:30:00 +08:00
|
|
|
static int dpaa2_switch_port_mdb_add(struct net_device *netdev,
|
net: switchdev: remove the transaction structure from port object notifiers
Since the introduction of the switchdev API, port objects were
transmitted to drivers for offloading using a two-step transactional
model, with a prepare phase that was supposed to catch all errors, and a
commit phase that was supposed to never fail.
Some classes of failures can never be avoided, like hardware access, or
memory allocation. In the latter case, merely attempting to move the
memory allocation to the preparation phase makes it impossible to avoid
memory leaks, since commit 91cf8eceffc1 ("switchdev: Remove unused
transaction item queue") which has removed the unused mechanism of
passing on the allocated memory between one phase and another.
It is time we admit that separating the preparation from the commit
phase is something that is best left for the driver to decide, and not
something that should be baked into the API, especially since there are
no switchdev callers that depend on this.
This patch removes the struct switchdev_trans member from switchdev port
object notifier structures, and converts drivers to not look at this
member.
Where driver conversion is trivial (like in the case of the Marvell
Prestera driver, NXP DPAA2 switch, TI CPSW, and Rocker drivers), it is
done in this patch.
Where driver conversion needs more attention (DSA, Mellanox Spectrum),
the conversion is left for subsequent patches and here we only fake the
prepare/commit phases at a lower level, just not in the switchdev
notifier itself.
Where the code has a natural structure that is best left alone as a
preparation and a commit phase (as in the case of the Ocelot switch),
that structure is left in place, just made to not depend upon the
switchdev transactional model.
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Acked-by: Linus Walleij <linus.walleij@linaro.org>
Acked-by: Jiri Pirko <jiri@nvidia.com>
Reviewed-by: Ido Schimmel <idosch@nvidia.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2021-01-09 08:01:48 +08:00
|
|
|
const struct switchdev_obj_port_mdb *mdb)
|
2018-03-14 23:55:54 +08:00
|
|
|
{
|
|
|
|
struct ethsw_port_priv *port_priv = netdev_priv(netdev);
|
|
|
|
int err;
|
|
|
|
|
|
|
|
/* Check if address is already set on this port */
|
2020-10-09 23:30:00 +08:00
|
|
|
if (dpaa2_switch_port_lookup_address(netdev, 0, mdb->addr))
|
2018-03-14 23:55:54 +08:00
|
|
|
return -EEXIST;
|
|
|
|
|
2020-10-09 23:30:00 +08:00
|
|
|
err = dpaa2_switch_port_fdb_add_mc(port_priv, mdb->addr);
|
2018-03-14 23:55:54 +08:00
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
err = dev_mc_add(netdev, mdb->addr);
|
|
|
|
if (err) {
|
|
|
|
netdev_err(netdev, "dev_mc_add err %d\n", err);
|
2020-10-09 23:30:00 +08:00
|
|
|
dpaa2_switch_port_fdb_del_mc(port_priv, mdb->addr);
|
2018-03-14 23:55:54 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2020-10-09 23:30:00 +08:00
|
|
|
static int dpaa2_switch_port_obj_add(struct net_device *netdev,
|
net: switchdev: remove the transaction structure from port object notifiers
Since the introduction of the switchdev API, port objects were
transmitted to drivers for offloading using a two-step transactional
model, with a prepare phase that was supposed to catch all errors, and a
commit phase that was supposed to never fail.
Some classes of failures can never be avoided, like hardware access, or
memory allocation. In the latter case, merely attempting to move the
memory allocation to the preparation phase makes it impossible to avoid
memory leaks, since commit 91cf8eceffc1 ("switchdev: Remove unused
transaction item queue") which has removed the unused mechanism of
passing on the allocated memory between one phase and another.
It is time we admit that separating the preparation from the commit
phase is something that is best left for the driver to decide, and not
something that should be baked into the API, especially since there are
no switchdev callers that depend on this.
This patch removes the struct switchdev_trans member from switchdev port
object notifier structures, and converts drivers to not look at this
member.
Where driver conversion is trivial (like in the case of the Marvell
Prestera driver, NXP DPAA2 switch, TI CPSW, and Rocker drivers), it is
done in this patch.
Where driver conversion needs more attention (DSA, Mellanox Spectrum),
the conversion is left for subsequent patches and here we only fake the
prepare/commit phases at a lower level, just not in the switchdev
notifier itself.
Where the code has a natural structure that is best left alone as a
preparation and a commit phase (as in the case of the Ocelot switch),
that structure is left in place, just made to not depend upon the
switchdev transactional model.
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Acked-by: Linus Walleij <linus.walleij@linaro.org>
Acked-by: Jiri Pirko <jiri@nvidia.com>
Reviewed-by: Ido Schimmel <idosch@nvidia.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2021-01-09 08:01:48 +08:00
|
|
|
const struct switchdev_obj *obj)
|
2018-03-14 23:55:54 +08:00
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
|
|
|
switch (obj->id) {
|
|
|
|
case SWITCHDEV_OBJ_ID_PORT_VLAN:
|
2020-10-09 23:30:00 +08:00
|
|
|
err = dpaa2_switch_port_vlans_add(netdev,
|
net: switchdev: remove the transaction structure from port object notifiers
Since the introduction of the switchdev API, port objects were
transmitted to drivers for offloading using a two-step transactional
model, with a prepare phase that was supposed to catch all errors, and a
commit phase that was supposed to never fail.
Some classes of failures can never be avoided, like hardware access, or
memory allocation. In the latter case, merely attempting to move the
memory allocation to the preparation phase makes it impossible to avoid
memory leaks, since commit 91cf8eceffc1 ("switchdev: Remove unused
transaction item queue") which has removed the unused mechanism of
passing on the allocated memory between one phase and another.
It is time we admit that separating the preparation from the commit
phase is something that is best left for the driver to decide, and not
something that should be baked into the API, especially since there are
no switchdev callers that depend on this.
This patch removes the struct switchdev_trans member from switchdev port
object notifier structures, and converts drivers to not look at this
member.
Where driver conversion is trivial (like in the case of the Marvell
Prestera driver, NXP DPAA2 switch, TI CPSW, and Rocker drivers), it is
done in this patch.
Where driver conversion needs more attention (DSA, Mellanox Spectrum),
the conversion is left for subsequent patches and here we only fake the
prepare/commit phases at a lower level, just not in the switchdev
notifier itself.
Where the code has a natural structure that is best left alone as a
preparation and a commit phase (as in the case of the Ocelot switch),
that structure is left in place, just made to not depend upon the
switchdev transactional model.
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Acked-by: Linus Walleij <linus.walleij@linaro.org>
Acked-by: Jiri Pirko <jiri@nvidia.com>
Reviewed-by: Ido Schimmel <idosch@nvidia.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2021-01-09 08:01:48 +08:00
|
|
|
SWITCHDEV_OBJ_PORT_VLAN(obj));
|
2018-03-14 23:55:54 +08:00
|
|
|
break;
|
|
|
|
case SWITCHDEV_OBJ_ID_PORT_MDB:
|
2020-10-09 23:30:00 +08:00
|
|
|
err = dpaa2_switch_port_mdb_add(netdev,
|
net: switchdev: remove the transaction structure from port object notifiers
Since the introduction of the switchdev API, port objects were
transmitted to drivers for offloading using a two-step transactional
model, with a prepare phase that was supposed to catch all errors, and a
commit phase that was supposed to never fail.
Some classes of failures can never be avoided, like hardware access, or
memory allocation. In the latter case, merely attempting to move the
memory allocation to the preparation phase makes it impossible to avoid
memory leaks, since commit 91cf8eceffc1 ("switchdev: Remove unused
transaction item queue") which has removed the unused mechanism of
passing on the allocated memory between one phase and another.
It is time we admit that separating the preparation from the commit
phase is something that is best left for the driver to decide, and not
something that should be baked into the API, especially since there are
no switchdev callers that depend on this.
This patch removes the struct switchdev_trans member from switchdev port
object notifier structures, and converts drivers to not look at this
member.
Where driver conversion is trivial (like in the case of the Marvell
Prestera driver, NXP DPAA2 switch, TI CPSW, and Rocker drivers), it is
done in this patch.
Where driver conversion needs more attention (DSA, Mellanox Spectrum),
the conversion is left for subsequent patches and here we only fake the
prepare/commit phases at a lower level, just not in the switchdev
notifier itself.
Where the code has a natural structure that is best left alone as a
preparation and a commit phase (as in the case of the Ocelot switch),
that structure is left in place, just made to not depend upon the
switchdev transactional model.
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Acked-by: Linus Walleij <linus.walleij@linaro.org>
Acked-by: Jiri Pirko <jiri@nvidia.com>
Reviewed-by: Ido Schimmel <idosch@nvidia.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2021-01-09 08:01:48 +08:00
|
|
|
SWITCHDEV_OBJ_PORT_MDB(obj));
|
2018-03-14 23:55:54 +08:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
err = -EOPNOTSUPP;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2020-10-09 23:30:00 +08:00
|
|
|
static int dpaa2_switch_port_del_vlan(struct ethsw_port_priv *port_priv, u16 vid)
|
2018-03-14 23:55:54 +08:00
|
|
|
{
|
|
|
|
struct ethsw_core *ethsw = port_priv->ethsw_data;
|
|
|
|
struct net_device *netdev = port_priv->netdev;
|
|
|
|
struct dpsw_vlan_if_cfg vcfg;
|
|
|
|
int i, err;
|
|
|
|
|
|
|
|
if (!port_priv->vlans[vid])
|
|
|
|
return -ENOENT;
|
|
|
|
|
|
|
|
if (port_priv->vlans[vid] & ETHSW_VLAN_PVID) {
|
staging: dpaa2-switch: properly setup switching domains
Until now, the DPAA2 switch was not capable to properly setup its
switching domains depending on the existence, or lack thereof, of a
upper bridge device. This meant that all switch ports of a DPSW object
were switching by default even though they were not under the same
bridge device.
Another issue was the inability to actually add the CPU in the flooding
domains (broadcast, unknown unicast etc) of a particular switch port.
This meant that a simple ping on a switch interface was not possible
since no broadcast ARP frame would actually reach the CPU queues.
This patch tries to fix exactly these problems by:
* Creating and managing a FDB table for each flooding domain. This means
that when a switch interface is not bridged it will use its own FDB
table. While in bridged mode all DPAA2 switch interfaces under the
same upper will use the same FDB table, thus leverage the same FDB
entries.
* Adding a new MC firmware command - dpsw_set_egress_flood() - through
which the driver can setup the flooding domains as needed. For
example, when the switch interface is standalone, thus not in a
bridge with any other DPAA2 switch port, it will setup its broadcast
and unknown unicast flooding domains to only include the control
interface (the queues that reach the CPU and the driver can dequeue
from). This flooding domain changes when the interface joins a bridge
and is configured to include, beside the control interface, all other
DPAA2 switch interfaces.
We impose a minimum limit of FDB tables available equal to the number of
switch interfaces so that we guarantee that, in the maximal
configuration - all interfaces are standalone, each switch port will
have a private FDB table. At the same time, we only probe DPSW objects
that have the flooding and broadcast replicators configured to be per
FDB (DPSW_*_PER_FDB). Without this, the dpaa2-switch driver would not
be able to configure multiple switching domains.
At probe time, a FDB table will be allocated for each port. At a bridge
join event, the switch port will either continue to use the current FDB
table (if it's the first dpaa2-switch port to join that bridge) or will
switch to use the FDB table associated with the port that it's already
under the bridge. If a FDB switch is necessary, the private FDB table
which was previously used will be returned to the pool of unused FDBs.
Upon a bridge leave, the switch port needs a private FDB table thus it
will search and get the first unused FDB table. This way, all the other
ports remaining under the bridge will continue to use the same FDB
table.
Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
Reviewed-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-03-10 20:14:47 +08:00
|
|
|
/* If we are deleting the PVID of a port, use VLAN 4095 instead
|
|
|
|
* as we are sure that neither the bridge nor the 8021q module
|
|
|
|
* will use it
|
|
|
|
*/
|
|
|
|
err = dpaa2_switch_port_set_pvid(port_priv, 4095);
|
2018-03-14 23:55:54 +08:00
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
vcfg.num_ifs = 1;
|
|
|
|
vcfg.if_id[0] = port_priv->idx;
|
|
|
|
if (port_priv->vlans[vid] & ETHSW_VLAN_UNTAGGED) {
|
|
|
|
err = dpsw_vlan_remove_if_untagged(ethsw->mc_io, 0,
|
|
|
|
ethsw->dpsw_handle,
|
|
|
|
vid, &vcfg);
|
|
|
|
if (err) {
|
|
|
|
netdev_err(netdev,
|
|
|
|
"dpsw_vlan_remove_if_untagged err %d\n",
|
|
|
|
err);
|
|
|
|
}
|
|
|
|
port_priv->vlans[vid] &= ~ETHSW_VLAN_UNTAGGED;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (port_priv->vlans[vid] & ETHSW_VLAN_MEMBER) {
|
|
|
|
err = dpsw_vlan_remove_if(ethsw->mc_io, 0, ethsw->dpsw_handle,
|
|
|
|
vid, &vcfg);
|
|
|
|
if (err) {
|
|
|
|
netdev_err(netdev,
|
|
|
|
"dpsw_vlan_remove_if err %d\n", err);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
port_priv->vlans[vid] &= ~ETHSW_VLAN_MEMBER;
|
|
|
|
|
|
|
|
/* Delete VLAN from switch if it is no longer configured on
|
|
|
|
* any port
|
|
|
|
*/
|
|
|
|
for (i = 0; i < ethsw->sw_attr.num_ifs; i++)
|
|
|
|
if (ethsw->ports[i]->vlans[vid] & ETHSW_VLAN_MEMBER)
|
|
|
|
return 0; /* Found a port member in VID */
|
|
|
|
|
|
|
|
ethsw->vlans[vid] &= ~ETHSW_VLAN_GLOBAL;
|
|
|
|
|
2020-10-09 23:30:00 +08:00
|
|
|
err = dpaa2_switch_dellink(ethsw, vid);
|
2018-03-14 23:55:54 +08:00
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-03-10 20:14:49 +08:00
|
|
|
int dpaa2_switch_port_vlans_del(struct net_device *netdev,
|
|
|
|
const struct switchdev_obj_port_vlan *vlan)
|
2018-03-14 23:55:54 +08:00
|
|
|
{
|
|
|
|
struct ethsw_port_priv *port_priv = netdev_priv(netdev);
|
|
|
|
|
2018-05-30 09:00:07 +08:00
|
|
|
if (netif_is_bridge_master(vlan->obj.orig_dev))
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
net: switchdev: remove vid_begin -> vid_end range from VLAN objects
The call path of a switchdev VLAN addition to the bridge looks something
like this today:
nbp_vlan_init
| __br_vlan_set_default_pvid
| | |
| | br_afspec |
| | | |
| | v |
| | br_process_vlan_info |
| | | |
| | v |
| | br_vlan_info |
| | / \ /
| | / \ /
| | / \ /
| | / \ /
v v v v v
nbp_vlan_add br_vlan_add ------+
| ^ ^ | |
| / | | |
| / / / |
\ br_vlan_get_master/ / v
\ ^ / / br_vlan_add_existing
\ | / / |
\ | / / /
\ | / / /
\ | / / /
\ | / / /
v | | v /
__vlan_add /
/ | /
/ | /
v | /
__vlan_vid_add | /
\ | /
v v v
br_switchdev_port_vlan_add
The ranges UAPI was introduced to the bridge in commit bdced7ef7838
("bridge: support for multiple vlans and vlan ranges in setlink and
dellink requests") (Jan 10 2015). But the VLAN ranges (parsed in br_afspec)
have always been passed one by one, through struct bridge_vlan_info
tmp_vinfo, to br_vlan_info. So the range never went too far in depth.
Then Scott Feldman introduced the switchdev_port_bridge_setlink function
in commit 47f8328bb1a4 ("switchdev: add new switchdev bridge setlink").
That marked the introduction of the SWITCHDEV_OBJ_PORT_VLAN, which made
full use of the range. But switchdev_port_bridge_setlink was called like
this:
br_setlink
-> br_afspec
-> switchdev_port_bridge_setlink
Basically, the switchdev and the bridge code were not tightly integrated.
Then commit 41c498b9359e ("bridge: restore br_setlink back to original")
came, and switchdev drivers were required to implement
.ndo_bridge_setlink = switchdev_port_bridge_setlink for a while.
In the meantime, commits such as 0944d6b5a2fa ("bridge: try switchdev op
first in __vlan_vid_add/del") finally made switchdev penetrate the
br_vlan_info() barrier and start to develop the call path we have today.
But remember, br_vlan_info() still receives VLANs one by one.
Then Arkadi Sharshevsky refactored the switchdev API in 2017 in commit
29ab586c3d83 ("net: switchdev: Remove bridge bypass support from
switchdev") so that drivers would not implement .ndo_bridge_setlink any
longer. The switchdev_port_bridge_setlink also got deleted.
This refactoring removed the parallel bridge_setlink implementation from
switchdev, and left the only switchdev VLAN objects to be the ones
offloaded from __vlan_vid_add (basically RX filtering) and __vlan_add
(the latter coming from commit 9c86ce2c1ae3 ("net: bridge: Notify about
bridge VLANs")).
That is to say, today the switchdev VLAN object ranges are not used in
the kernel. Refactoring the above call path is a bit complicated, when
the bridge VLAN call path is already a bit complicated.
Let's go off and finish the job of commit 29ab586c3d83 by deleting the
bogus iteration through the VLAN ranges from the drivers. Some aspects
of this feature never made too much sense in the first place. For
example, what is a range of VLANs all having the BRIDGE_VLAN_INFO_PVID
flag supposed to mean, when a port can obviously have a single pvid?
This particular configuration _is_ denied as of commit 6623c60dc28e
("bridge: vlan: enforce no pvid flag in vlan ranges"), but from an API
perspective, the driver still has to play pretend, and only offload the
vlan->vid_end as pvid. And the addition of a switchdev VLAN object can
modify the flags of another, completely unrelated, switchdev VLAN
object! (a VLAN that is PVID will invalidate the PVID flag from whatever
other VLAN had previously been offloaded with switchdev and had that
flag. Yet switchdev never notifies about that change, drivers are
supposed to guess).
Nonetheless, having a VLAN range in the API makes error handling look
scarier than it really is - unwinding on errors and all of that.
When in reality, no one really calls this API with more than one VLAN.
It is all unnecessary complexity.
And despite appearing pretentious (two-phase transactional model and
all), the switchdev API is really sloppy because the VLAN addition and
removal operations are not paired with one another (you can add a VLAN
100 times and delete it just once). The bridge notifies through
switchdev of a VLAN addition not only when the flags of an existing VLAN
change, but also when nothing changes. There are switchdev drivers out
there who don't like adding a VLAN that has already been added, and
those checks don't really belong at driver level. But the fact that the
API contains ranges is yet another factor that prevents this from being
addressed in the future.
Of the existing switchdev pieces of hardware, it appears that only
Mellanox Spectrum supports offloading more than one VLAN at a time,
through mlxsw_sp_port_vlan_set. I have kept that code internal to the
driver, because there is some more bookkeeping that makes use of it, but
I deleted it from the switchdev API. But since the switchdev support for
ranges has already been de facto deleted by a Mellanox employee and
nobody noticed for 4 years, I'm going to assume it's not a biggie.
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Reviewed-by: Ido Schimmel <idosch@nvidia.com> # switchdev and mlxsw
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Reviewed-by: Kurt Kanzenbach <kurt@linutronix.de> # hellcreek
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2021-01-09 08:01:46 +08:00
|
|
|
return dpaa2_switch_port_del_vlan(port_priv, vlan->vid);
|
2018-03-14 23:55:54 +08:00
|
|
|
}
|
|
|
|
|
2020-10-09 23:30:00 +08:00
|
|
|
static int dpaa2_switch_port_mdb_del(struct net_device *netdev,
|
|
|
|
const struct switchdev_obj_port_mdb *mdb)
|
2018-03-14 23:55:54 +08:00
|
|
|
{
|
|
|
|
struct ethsw_port_priv *port_priv = netdev_priv(netdev);
|
|
|
|
int err;
|
|
|
|
|
2020-10-09 23:30:00 +08:00
|
|
|
if (!dpaa2_switch_port_lookup_address(netdev, 0, mdb->addr))
|
2018-03-14 23:55:54 +08:00
|
|
|
return -ENOENT;
|
|
|
|
|
2020-10-09 23:30:00 +08:00
|
|
|
err = dpaa2_switch_port_fdb_del_mc(port_priv, mdb->addr);
|
2018-03-14 23:55:54 +08:00
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
err = dev_mc_del(netdev, mdb->addr);
|
|
|
|
if (err) {
|
|
|
|
netdev_err(netdev, "dev_mc_del err %d\n", err);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2020-10-09 23:30:00 +08:00
|
|
|
static int dpaa2_switch_port_obj_del(struct net_device *netdev,
|
|
|
|
const struct switchdev_obj *obj)
|
2018-03-14 23:55:54 +08:00
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
|
|
|
switch (obj->id) {
|
|
|
|
case SWITCHDEV_OBJ_ID_PORT_VLAN:
|
2020-10-09 23:30:00 +08:00
|
|
|
err = dpaa2_switch_port_vlans_del(netdev, SWITCHDEV_OBJ_PORT_VLAN(obj));
|
2018-03-14 23:55:54 +08:00
|
|
|
break;
|
|
|
|
case SWITCHDEV_OBJ_ID_PORT_MDB:
|
2020-10-09 23:30:00 +08:00
|
|
|
err = dpaa2_switch_port_mdb_del(netdev, SWITCHDEV_OBJ_PORT_MDB(obj));
|
2018-03-14 23:55:54 +08:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
err = -EOPNOTSUPP;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2020-10-09 23:30:00 +08:00
|
|
|
static int dpaa2_switch_port_attr_set_event(struct net_device *netdev,
|
2021-03-10 20:14:49 +08:00
|
|
|
struct switchdev_notifier_port_attr_info *ptr)
|
2019-02-28 03:44:30 +08:00
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
2021-03-10 20:14:49 +08:00
|
|
|
err = switchdev_handle_port_attr_set(netdev, ptr,
|
|
|
|
dpaa2_switch_port_dev_check,
|
|
|
|
dpaa2_switch_port_attr_set);
|
2019-02-28 03:44:30 +08:00
|
|
|
return notifier_from_errno(err);
|
|
|
|
}
|
|
|
|
|
2020-10-09 23:30:00 +08:00
|
|
|
static int dpaa2_switch_port_bridge_join(struct net_device *netdev,
|
|
|
|
struct net_device *upper_dev)
|
2018-03-14 23:55:54 +08:00
|
|
|
{
|
|
|
|
struct ethsw_port_priv *port_priv = netdev_priv(netdev);
|
|
|
|
struct ethsw_core *ethsw = port_priv->ethsw_data;
|
2020-07-21 17:19:15 +08:00
|
|
|
struct ethsw_port_priv *other_port_priv;
|
|
|
|
struct net_device *other_dev;
|
|
|
|
struct list_head *iter;
|
2021-03-23 04:58:56 +08:00
|
|
|
bool learn_ena;
|
staging: dpaa2-switch: properly setup switching domains
Until now, the DPAA2 switch was not capable to properly setup its
switching domains depending on the existence, or lack thereof, of a
upper bridge device. This meant that all switch ports of a DPSW object
were switching by default even though they were not under the same
bridge device.
Another issue was the inability to actually add the CPU in the flooding
domains (broadcast, unknown unicast etc) of a particular switch port.
This meant that a simple ping on a switch interface was not possible
since no broadcast ARP frame would actually reach the CPU queues.
This patch tries to fix exactly these problems by:
* Creating and managing a FDB table for each flooding domain. This means
that when a switch interface is not bridged it will use its own FDB
table. While in bridged mode all DPAA2 switch interfaces under the
same upper will use the same FDB table, thus leverage the same FDB
entries.
* Adding a new MC firmware command - dpsw_set_egress_flood() - through
which the driver can setup the flooding domains as needed. For
example, when the switch interface is standalone, thus not in a
bridge with any other DPAA2 switch port, it will setup its broadcast
and unknown unicast flooding domains to only include the control
interface (the queues that reach the CPU and the driver can dequeue
from). This flooding domain changes when the interface joins a bridge
and is configured to include, beside the control interface, all other
DPAA2 switch interfaces.
We impose a minimum limit of FDB tables available equal to the number of
switch interfaces so that we guarantee that, in the maximal
configuration - all interfaces are standalone, each switch port will
have a private FDB table. At the same time, we only probe DPSW objects
that have the flooding and broadcast replicators configured to be per
FDB (DPSW_*_PER_FDB). Without this, the dpaa2-switch driver would not
be able to configure multiple switching domains.
At probe time, a FDB table will be allocated for each port. At a bridge
join event, the switch port will either continue to use the current FDB
table (if it's the first dpaa2-switch port to join that bridge) or will
switch to use the FDB table associated with the port that it's already
under the bridge. If a FDB switch is necessary, the private FDB table
which was previously used will be returned to the pool of unused FDBs.
Upon a bridge leave, the switch port needs a private FDB table thus it
will search and get the first unused FDB table. This way, all the other
ports remaining under the bridge will continue to use the same FDB
table.
Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
Reviewed-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-03-10 20:14:47 +08:00
|
|
|
int err;
|
2018-03-14 23:55:54 +08:00
|
|
|
|
2020-07-21 17:19:15 +08:00
|
|
|
netdev_for_each_lower_dev(upper_dev, other_dev, iter) {
|
2021-03-10 20:14:48 +08:00
|
|
|
if (!dpaa2_switch_port_dev_check(other_dev))
|
2020-07-21 17:19:15 +08:00
|
|
|
continue;
|
|
|
|
|
|
|
|
other_port_priv = netdev_priv(other_dev);
|
|
|
|
if (other_port_priv->ethsw_data != port_priv->ethsw_data) {
|
|
|
|
netdev_err(netdev,
|
|
|
|
"Interface from a different DPSW is in the bridge already!\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
staging: dpaa2-switch: properly setup switching domains
Until now, the DPAA2 switch was not capable to properly setup its
switching domains depending on the existence, or lack thereof, of a
upper bridge device. This meant that all switch ports of a DPSW object
were switching by default even though they were not under the same
bridge device.
Another issue was the inability to actually add the CPU in the flooding
domains (broadcast, unknown unicast etc) of a particular switch port.
This meant that a simple ping on a switch interface was not possible
since no broadcast ARP frame would actually reach the CPU queues.
This patch tries to fix exactly these problems by:
* Creating and managing a FDB table for each flooding domain. This means
that when a switch interface is not bridged it will use its own FDB
table. While in bridged mode all DPAA2 switch interfaces under the
same upper will use the same FDB table, thus leverage the same FDB
entries.
* Adding a new MC firmware command - dpsw_set_egress_flood() - through
which the driver can setup the flooding domains as needed. For
example, when the switch interface is standalone, thus not in a
bridge with any other DPAA2 switch port, it will setup its broadcast
and unknown unicast flooding domains to only include the control
interface (the queues that reach the CPU and the driver can dequeue
from). This flooding domain changes when the interface joins a bridge
and is configured to include, beside the control interface, all other
DPAA2 switch interfaces.
We impose a minimum limit of FDB tables available equal to the number of
switch interfaces so that we guarantee that, in the maximal
configuration - all interfaces are standalone, each switch port will
have a private FDB table. At the same time, we only probe DPSW objects
that have the flooding and broadcast replicators configured to be per
FDB (DPSW_*_PER_FDB). Without this, the dpaa2-switch driver would not
be able to configure multiple switching domains.
At probe time, a FDB table will be allocated for each port. At a bridge
join event, the switch port will either continue to use the current FDB
table (if it's the first dpaa2-switch port to join that bridge) or will
switch to use the FDB table associated with the port that it's already
under the bridge. If a FDB switch is necessary, the private FDB table
which was previously used will be returned to the pool of unused FDBs.
Upon a bridge leave, the switch port needs a private FDB table thus it
will search and get the first unused FDB table. This way, all the other
ports remaining under the bridge will continue to use the same FDB
table.
Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
Reviewed-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-03-10 20:14:47 +08:00
|
|
|
/* Delete the previously manually installed VLAN 1 */
|
|
|
|
err = dpaa2_switch_port_del_vlan(port_priv, 1);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
dpaa2_switch_port_set_fdb(port_priv, upper_dev);
|
|
|
|
|
2021-03-23 04:58:56 +08:00
|
|
|
/* Inherit the initial bridge port learning state */
|
|
|
|
learn_ena = br_port_flag_is_set(netdev, BR_LEARNING);
|
|
|
|
err = dpaa2_switch_port_set_learning(port_priv, learn_ena);
|
2021-03-30 22:54:17 +08:00
|
|
|
port_priv->learn_ena = learn_ena;
|
2021-03-23 04:58:56 +08:00
|
|
|
|
staging: dpaa2-switch: properly setup switching domains
Until now, the DPAA2 switch was not capable to properly setup its
switching domains depending on the existence, or lack thereof, of a
upper bridge device. This meant that all switch ports of a DPSW object
were switching by default even though they were not under the same
bridge device.
Another issue was the inability to actually add the CPU in the flooding
domains (broadcast, unknown unicast etc) of a particular switch port.
This meant that a simple ping on a switch interface was not possible
since no broadcast ARP frame would actually reach the CPU queues.
This patch tries to fix exactly these problems by:
* Creating and managing a FDB table for each flooding domain. This means
that when a switch interface is not bridged it will use its own FDB
table. While in bridged mode all DPAA2 switch interfaces under the
same upper will use the same FDB table, thus leverage the same FDB
entries.
* Adding a new MC firmware command - dpsw_set_egress_flood() - through
which the driver can setup the flooding domains as needed. For
example, when the switch interface is standalone, thus not in a
bridge with any other DPAA2 switch port, it will setup its broadcast
and unknown unicast flooding domains to only include the control
interface (the queues that reach the CPU and the driver can dequeue
from). This flooding domain changes when the interface joins a bridge
and is configured to include, beside the control interface, all other
DPAA2 switch interfaces.
We impose a minimum limit of FDB tables available equal to the number of
switch interfaces so that we guarantee that, in the maximal
configuration - all interfaces are standalone, each switch port will
have a private FDB table. At the same time, we only probe DPSW objects
that have the flooding and broadcast replicators configured to be per
FDB (DPSW_*_PER_FDB). Without this, the dpaa2-switch driver would not
be able to configure multiple switching domains.
At probe time, a FDB table will be allocated for each port. At a bridge
join event, the switch port will either continue to use the current FDB
table (if it's the first dpaa2-switch port to join that bridge) or will
switch to use the FDB table associated with the port that it's already
under the bridge. If a FDB switch is necessary, the private FDB table
which was previously used will be returned to the pool of unused FDBs.
Upon a bridge leave, the switch port needs a private FDB table thus it
will search and get the first unused FDB table. This way, all the other
ports remaining under the bridge will continue to use the same FDB
table.
Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
Reviewed-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-03-10 20:14:47 +08:00
|
|
|
/* Setup the egress flood policy (broadcast, unknown unicast) */
|
|
|
|
err = dpaa2_switch_fdb_set_egress_flood(ethsw, port_priv->fdb->fdb_id);
|
|
|
|
if (err)
|
|
|
|
goto err_egress_flood;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_egress_flood:
|
|
|
|
dpaa2_switch_port_set_fdb(port_priv, NULL);
|
2018-03-14 23:55:54 +08:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
staging: dpaa2-switch: properly setup switching domains
Until now, the DPAA2 switch was not capable to properly setup its
switching domains depending on the existence, or lack thereof, of a
upper bridge device. This meant that all switch ports of a DPSW object
were switching by default even though they were not under the same
bridge device.
Another issue was the inability to actually add the CPU in the flooding
domains (broadcast, unknown unicast etc) of a particular switch port.
This meant that a simple ping on a switch interface was not possible
since no broadcast ARP frame would actually reach the CPU queues.
This patch tries to fix exactly these problems by:
* Creating and managing a FDB table for each flooding domain. This means
that when a switch interface is not bridged it will use its own FDB
table. While in bridged mode all DPAA2 switch interfaces under the
same upper will use the same FDB table, thus leverage the same FDB
entries.
* Adding a new MC firmware command - dpsw_set_egress_flood() - through
which the driver can setup the flooding domains as needed. For
example, when the switch interface is standalone, thus not in a
bridge with any other DPAA2 switch port, it will setup its broadcast
and unknown unicast flooding domains to only include the control
interface (the queues that reach the CPU and the driver can dequeue
from). This flooding domain changes when the interface joins a bridge
and is configured to include, beside the control interface, all other
DPAA2 switch interfaces.
We impose a minimum limit of FDB tables available equal to the number of
switch interfaces so that we guarantee that, in the maximal
configuration - all interfaces are standalone, each switch port will
have a private FDB table. At the same time, we only probe DPSW objects
that have the flooding and broadcast replicators configured to be per
FDB (DPSW_*_PER_FDB). Without this, the dpaa2-switch driver would not
be able to configure multiple switching domains.
At probe time, a FDB table will be allocated for each port. At a bridge
join event, the switch port will either continue to use the current FDB
table (if it's the first dpaa2-switch port to join that bridge) or will
switch to use the FDB table associated with the port that it's already
under the bridge. If a FDB switch is necessary, the private FDB table
which was previously used will be returned to the pool of unused FDBs.
Upon a bridge leave, the switch port needs a private FDB table thus it
will search and get the first unused FDB table. This way, all the other
ports remaining under the bridge will continue to use the same FDB
table.
Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
Reviewed-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-03-10 20:14:47 +08:00
|
|
|
static int dpaa2_switch_port_clear_rxvlan(struct net_device *vdev, int vid, void *arg)
|
|
|
|
{
|
|
|
|
__be16 vlan_proto = htons(ETH_P_8021Q);
|
|
|
|
|
|
|
|
if (vdev)
|
|
|
|
vlan_proto = vlan_dev_vlan_proto(vdev);
|
|
|
|
|
|
|
|
return dpaa2_switch_port_vlan_kill(arg, vlan_proto, vid);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int dpaa2_switch_port_restore_rxvlan(struct net_device *vdev, int vid, void *arg)
|
|
|
|
{
|
|
|
|
__be16 vlan_proto = htons(ETH_P_8021Q);
|
|
|
|
|
|
|
|
if (vdev)
|
|
|
|
vlan_proto = vlan_dev_vlan_proto(vdev);
|
|
|
|
|
|
|
|
return dpaa2_switch_port_vlan_add(arg, vlan_proto, vid);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int dpaa2_switch_port_bridge_leave(struct net_device *netdev)
|
|
|
|
{
|
|
|
|
struct ethsw_port_priv *port_priv = netdev_priv(netdev);
|
|
|
|
struct dpaa2_switch_fdb *old_fdb = port_priv->fdb;
|
|
|
|
struct ethsw_core *ethsw = port_priv->ethsw_data;
|
|
|
|
int err;
|
|
|
|
|
2021-03-10 20:14:50 +08:00
|
|
|
/* First of all, fast age any learn FDB addresses on this switch port */
|
|
|
|
dpaa2_switch_port_fast_age(port_priv);
|
|
|
|
|
staging: dpaa2-switch: properly setup switching domains
Until now, the DPAA2 switch was not capable to properly setup its
switching domains depending on the existence, or lack thereof, of a
upper bridge device. This meant that all switch ports of a DPSW object
were switching by default even though they were not under the same
bridge device.
Another issue was the inability to actually add the CPU in the flooding
domains (broadcast, unknown unicast etc) of a particular switch port.
This meant that a simple ping on a switch interface was not possible
since no broadcast ARP frame would actually reach the CPU queues.
This patch tries to fix exactly these problems by:
* Creating and managing a FDB table for each flooding domain. This means
that when a switch interface is not bridged it will use its own FDB
table. While in bridged mode all DPAA2 switch interfaces under the
same upper will use the same FDB table, thus leverage the same FDB
entries.
* Adding a new MC firmware command - dpsw_set_egress_flood() - through
which the driver can setup the flooding domains as needed. For
example, when the switch interface is standalone, thus not in a
bridge with any other DPAA2 switch port, it will setup its broadcast
and unknown unicast flooding domains to only include the control
interface (the queues that reach the CPU and the driver can dequeue
from). This flooding domain changes when the interface joins a bridge
and is configured to include, beside the control interface, all other
DPAA2 switch interfaces.
We impose a minimum limit of FDB tables available equal to the number of
switch interfaces so that we guarantee that, in the maximal
configuration - all interfaces are standalone, each switch port will
have a private FDB table. At the same time, we only probe DPSW objects
that have the flooding and broadcast replicators configured to be per
FDB (DPSW_*_PER_FDB). Without this, the dpaa2-switch driver would not
be able to configure multiple switching domains.
At probe time, a FDB table will be allocated for each port. At a bridge
join event, the switch port will either continue to use the current FDB
table (if it's the first dpaa2-switch port to join that bridge) or will
switch to use the FDB table associated with the port that it's already
under the bridge. If a FDB switch is necessary, the private FDB table
which was previously used will be returned to the pool of unused FDBs.
Upon a bridge leave, the switch port needs a private FDB table thus it
will search and get the first unused FDB table. This way, all the other
ports remaining under the bridge will continue to use the same FDB
table.
Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
Reviewed-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-03-10 20:14:47 +08:00
|
|
|
/* Clear all RX VLANs installed through vlan_vid_add() either as VLAN
|
|
|
|
* upper devices or otherwise from the FDB table that we are about to
|
|
|
|
* leave
|
|
|
|
*/
|
|
|
|
err = vlan_for_each(netdev, dpaa2_switch_port_clear_rxvlan, netdev);
|
|
|
|
if (err)
|
|
|
|
netdev_err(netdev, "Unable to clear RX VLANs from old FDB table, err (%d)\n", err);
|
|
|
|
|
|
|
|
dpaa2_switch_port_set_fdb(port_priv, NULL);
|
|
|
|
|
|
|
|
/* Restore all RX VLANs into the new FDB table that we just joined */
|
|
|
|
err = vlan_for_each(netdev, dpaa2_switch_port_restore_rxvlan, netdev);
|
|
|
|
if (err)
|
|
|
|
netdev_err(netdev, "Unable to restore RX VLANs to the new FDB, err (%d)\n", err);
|
|
|
|
|
2021-03-23 04:58:57 +08:00
|
|
|
/* Reset the flooding state to denote that this port can send any
|
|
|
|
* packet in standalone mode. With this, we are also ensuring that any
|
|
|
|
* later bridge join will have the flooding flag on.
|
|
|
|
*/
|
|
|
|
port_priv->bcast_flood = true;
|
2021-03-23 04:58:58 +08:00
|
|
|
port_priv->ucast_flood = true;
|
2021-03-23 04:58:57 +08:00
|
|
|
|
staging: dpaa2-switch: properly setup switching domains
Until now, the DPAA2 switch was not capable to properly setup its
switching domains depending on the existence, or lack thereof, of a
upper bridge device. This meant that all switch ports of a DPSW object
were switching by default even though they were not under the same
bridge device.
Another issue was the inability to actually add the CPU in the flooding
domains (broadcast, unknown unicast etc) of a particular switch port.
This meant that a simple ping on a switch interface was not possible
since no broadcast ARP frame would actually reach the CPU queues.
This patch tries to fix exactly these problems by:
* Creating and managing a FDB table for each flooding domain. This means
that when a switch interface is not bridged it will use its own FDB
table. While in bridged mode all DPAA2 switch interfaces under the
same upper will use the same FDB table, thus leverage the same FDB
entries.
* Adding a new MC firmware command - dpsw_set_egress_flood() - through
which the driver can setup the flooding domains as needed. For
example, when the switch interface is standalone, thus not in a
bridge with any other DPAA2 switch port, it will setup its broadcast
and unknown unicast flooding domains to only include the control
interface (the queues that reach the CPU and the driver can dequeue
from). This flooding domain changes when the interface joins a bridge
and is configured to include, beside the control interface, all other
DPAA2 switch interfaces.
We impose a minimum limit of FDB tables available equal to the number of
switch interfaces so that we guarantee that, in the maximal
configuration - all interfaces are standalone, each switch port will
have a private FDB table. At the same time, we only probe DPSW objects
that have the flooding and broadcast replicators configured to be per
FDB (DPSW_*_PER_FDB). Without this, the dpaa2-switch driver would not
be able to configure multiple switching domains.
At probe time, a FDB table will be allocated for each port. At a bridge
join event, the switch port will either continue to use the current FDB
table (if it's the first dpaa2-switch port to join that bridge) or will
switch to use the FDB table associated with the port that it's already
under the bridge. If a FDB switch is necessary, the private FDB table
which was previously used will be returned to the pool of unused FDBs.
Upon a bridge leave, the switch port needs a private FDB table thus it
will search and get the first unused FDB table. This way, all the other
ports remaining under the bridge will continue to use the same FDB
table.
Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
Reviewed-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-03-10 20:14:47 +08:00
|
|
|
/* Setup the egress flood policy (broadcast, unknown unicast).
|
|
|
|
* When the port is not under a bridge, only the CTRL interface is part
|
|
|
|
* of the flooding domain besides the actual port
|
|
|
|
*/
|
|
|
|
err = dpaa2_switch_fdb_set_egress_flood(ethsw, port_priv->fdb->fdb_id);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
/* Recreate the egress flood domain of the FDB that we just left */
|
|
|
|
err = dpaa2_switch_fdb_set_egress_flood(ethsw, old_fdb->fdb_id);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
2021-03-23 04:58:56 +08:00
|
|
|
/* No HW learning when not under a bridge */
|
|
|
|
err = dpaa2_switch_port_set_learning(port_priv, false);
|
|
|
|
if (err)
|
|
|
|
return err;
|
2021-03-30 22:54:17 +08:00
|
|
|
port_priv->learn_ena = false;
|
2021-03-23 04:58:56 +08:00
|
|
|
|
staging: dpaa2-switch: properly setup switching domains
Until now, the DPAA2 switch was not capable to properly setup its
switching domains depending on the existence, or lack thereof, of a
upper bridge device. This meant that all switch ports of a DPSW object
were switching by default even though they were not under the same
bridge device.
Another issue was the inability to actually add the CPU in the flooding
domains (broadcast, unknown unicast etc) of a particular switch port.
This meant that a simple ping on a switch interface was not possible
since no broadcast ARP frame would actually reach the CPU queues.
This patch tries to fix exactly these problems by:
* Creating and managing a FDB table for each flooding domain. This means
that when a switch interface is not bridged it will use its own FDB
table. While in bridged mode all DPAA2 switch interfaces under the
same upper will use the same FDB table, thus leverage the same FDB
entries.
* Adding a new MC firmware command - dpsw_set_egress_flood() - through
which the driver can setup the flooding domains as needed. For
example, when the switch interface is standalone, thus not in a
bridge with any other DPAA2 switch port, it will setup its broadcast
and unknown unicast flooding domains to only include the control
interface (the queues that reach the CPU and the driver can dequeue
from). This flooding domain changes when the interface joins a bridge
and is configured to include, beside the control interface, all other
DPAA2 switch interfaces.
We impose a minimum limit of FDB tables available equal to the number of
switch interfaces so that we guarantee that, in the maximal
configuration - all interfaces are standalone, each switch port will
have a private FDB table. At the same time, we only probe DPSW objects
that have the flooding and broadcast replicators configured to be per
FDB (DPSW_*_PER_FDB). Without this, the dpaa2-switch driver would not
be able to configure multiple switching domains.
At probe time, a FDB table will be allocated for each port. At a bridge
join event, the switch port will either continue to use the current FDB
table (if it's the first dpaa2-switch port to join that bridge) or will
switch to use the FDB table associated with the port that it's already
under the bridge. If a FDB switch is necessary, the private FDB table
which was previously used will be returned to the pool of unused FDBs.
Upon a bridge leave, the switch port needs a private FDB table thus it
will search and get the first unused FDB table. This way, all the other
ports remaining under the bridge will continue to use the same FDB
table.
Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
Reviewed-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-03-10 20:14:47 +08:00
|
|
|
/* Add the VLAN 1 as PVID when not under a bridge. We need this since
|
|
|
|
* the dpaa2 switch interfaces are not capable to be VLAN unaware
|
|
|
|
*/
|
|
|
|
return dpaa2_switch_port_add_vlan(port_priv, DEFAULT_VLAN_ID,
|
|
|
|
BRIDGE_VLAN_INFO_UNTAGGED | BRIDGE_VLAN_INFO_PVID);
|
|
|
|
}
|
|
|
|
|
2021-03-10 20:14:51 +08:00
|
|
|
static int dpaa2_switch_prevent_bridging_with_8021q_upper(struct net_device *netdev)
|
|
|
|
{
|
|
|
|
struct net_device *upper_dev;
|
|
|
|
struct list_head *iter;
|
|
|
|
|
|
|
|
/* RCU read lock not necessary because we have write-side protection
|
|
|
|
* (rtnl_mutex), however a non-rcu iterator does not exist.
|
|
|
|
*/
|
|
|
|
netdev_for_each_upper_dev_rcu(netdev, upper_dev, iter)
|
|
|
|
if (is_vlan_dev(upper_dev))
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-10-09 23:30:00 +08:00
|
|
|
static int dpaa2_switch_port_netdevice_event(struct notifier_block *nb,
|
|
|
|
unsigned long event, void *ptr)
|
2018-03-14 23:55:54 +08:00
|
|
|
{
|
|
|
|
struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
|
|
|
|
struct netdev_notifier_changeupper_info *info = ptr;
|
2021-03-10 20:14:49 +08:00
|
|
|
struct netlink_ext_ack *extack;
|
2018-03-14 23:55:54 +08:00
|
|
|
struct net_device *upper_dev;
|
|
|
|
int err = 0;
|
|
|
|
|
2021-03-10 20:14:48 +08:00
|
|
|
if (!dpaa2_switch_port_dev_check(netdev))
|
2018-03-14 23:55:54 +08:00
|
|
|
return NOTIFY_DONE;
|
|
|
|
|
2021-03-10 20:14:49 +08:00
|
|
|
extack = netdev_notifier_info_to_extack(&info->info);
|
|
|
|
|
|
|
|
switch (event) {
|
|
|
|
case NETDEV_PRECHANGEUPPER:
|
|
|
|
upper_dev = info->upper_dev;
|
2021-03-10 20:14:51 +08:00
|
|
|
if (!netif_is_bridge_master(upper_dev))
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (!br_vlan_enabled(upper_dev)) {
|
2021-03-10 20:14:49 +08:00
|
|
|
NL_SET_ERR_MSG_MOD(extack, "Cannot join a VLAN-unaware bridge");
|
|
|
|
err = -EOPNOTSUPP;
|
2021-03-10 20:14:51 +08:00
|
|
|
goto out;
|
2021-03-10 20:14:49 +08:00
|
|
|
}
|
2021-03-10 20:14:51 +08:00
|
|
|
|
|
|
|
err = dpaa2_switch_prevent_bridging_with_8021q_upper(netdev);
|
|
|
|
if (err) {
|
|
|
|
NL_SET_ERR_MSG_MOD(extack,
|
|
|
|
"Cannot join a bridge while VLAN uppers are present");
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2021-03-10 20:14:49 +08:00
|
|
|
break;
|
|
|
|
case NETDEV_CHANGEUPPER:
|
2018-03-14 23:55:54 +08:00
|
|
|
upper_dev = info->upper_dev;
|
|
|
|
if (netif_is_bridge_master(upper_dev)) {
|
|
|
|
if (info->linking)
|
2020-10-09 23:30:00 +08:00
|
|
|
err = dpaa2_switch_port_bridge_join(netdev, upper_dev);
|
staging: dpaa2-switch: properly setup switching domains
Until now, the DPAA2 switch was not capable to properly setup its
switching domains depending on the existence, or lack thereof, of a
upper bridge device. This meant that all switch ports of a DPSW object
were switching by default even though they were not under the same
bridge device.
Another issue was the inability to actually add the CPU in the flooding
domains (broadcast, unknown unicast etc) of a particular switch port.
This meant that a simple ping on a switch interface was not possible
since no broadcast ARP frame would actually reach the CPU queues.
This patch tries to fix exactly these problems by:
* Creating and managing a FDB table for each flooding domain. This means
that when a switch interface is not bridged it will use its own FDB
table. While in bridged mode all DPAA2 switch interfaces under the
same upper will use the same FDB table, thus leverage the same FDB
entries.
* Adding a new MC firmware command - dpsw_set_egress_flood() - through
which the driver can setup the flooding domains as needed. For
example, when the switch interface is standalone, thus not in a
bridge with any other DPAA2 switch port, it will setup its broadcast
and unknown unicast flooding domains to only include the control
interface (the queues that reach the CPU and the driver can dequeue
from). This flooding domain changes when the interface joins a bridge
and is configured to include, beside the control interface, all other
DPAA2 switch interfaces.
We impose a minimum limit of FDB tables available equal to the number of
switch interfaces so that we guarantee that, in the maximal
configuration - all interfaces are standalone, each switch port will
have a private FDB table. At the same time, we only probe DPSW objects
that have the flooding and broadcast replicators configured to be per
FDB (DPSW_*_PER_FDB). Without this, the dpaa2-switch driver would not
be able to configure multiple switching domains.
At probe time, a FDB table will be allocated for each port. At a bridge
join event, the switch port will either continue to use the current FDB
table (if it's the first dpaa2-switch port to join that bridge) or will
switch to use the FDB table associated with the port that it's already
under the bridge. If a FDB switch is necessary, the private FDB table
which was previously used will be returned to the pool of unused FDBs.
Upon a bridge leave, the switch port needs a private FDB table thus it
will search and get the first unused FDB table. This way, all the other
ports remaining under the bridge will continue to use the same FDB
table.
Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
Reviewed-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-03-10 20:14:47 +08:00
|
|
|
else
|
|
|
|
err = dpaa2_switch_port_bridge_leave(netdev);
|
2018-03-14 23:55:54 +08:00
|
|
|
}
|
2021-03-10 20:14:49 +08:00
|
|
|
break;
|
2018-03-14 23:55:54 +08:00
|
|
|
}
|
|
|
|
|
2021-03-10 20:14:51 +08:00
|
|
|
out:
|
2018-03-14 23:55:54 +08:00
|
|
|
return notifier_from_errno(err);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct ethsw_switchdev_event_work {
|
|
|
|
struct work_struct work;
|
|
|
|
struct switchdev_notifier_fdb_info fdb_info;
|
|
|
|
struct net_device *dev;
|
|
|
|
unsigned long event;
|
|
|
|
};
|
|
|
|
|
2020-10-09 23:30:00 +08:00
|
|
|
static void dpaa2_switch_event_work(struct work_struct *work)
|
2018-03-14 23:55:54 +08:00
|
|
|
{
|
|
|
|
struct ethsw_switchdev_event_work *switchdev_work =
|
|
|
|
container_of(work, struct ethsw_switchdev_event_work, work);
|
|
|
|
struct net_device *dev = switchdev_work->dev;
|
|
|
|
struct switchdev_notifier_fdb_info *fdb_info;
|
2019-07-30 00:11:49 +08:00
|
|
|
int err;
|
2018-03-14 23:55:54 +08:00
|
|
|
|
|
|
|
rtnl_lock();
|
|
|
|
fdb_info = &switchdev_work->fdb_info;
|
|
|
|
|
|
|
|
switch (switchdev_work->event) {
|
|
|
|
case SWITCHDEV_FDB_ADD_TO_DEVICE:
|
2019-07-30 00:11:51 +08:00
|
|
|
if (!fdb_info->added_by_user)
|
|
|
|
break;
|
2018-03-14 23:55:54 +08:00
|
|
|
if (is_unicast_ether_addr(fdb_info->addr))
|
2020-10-09 23:30:00 +08:00
|
|
|
err = dpaa2_switch_port_fdb_add_uc(netdev_priv(dev),
|
|
|
|
fdb_info->addr);
|
2018-03-14 23:55:54 +08:00
|
|
|
else
|
2020-10-09 23:30:00 +08:00
|
|
|
err = dpaa2_switch_port_fdb_add_mc(netdev_priv(dev),
|
|
|
|
fdb_info->addr);
|
2019-07-30 00:11:49 +08:00
|
|
|
if (err)
|
|
|
|
break;
|
|
|
|
fdb_info->offloaded = true;
|
|
|
|
call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED, dev,
|
|
|
|
&fdb_info->info, NULL);
|
2018-03-14 23:55:54 +08:00
|
|
|
break;
|
|
|
|
case SWITCHDEV_FDB_DEL_TO_DEVICE:
|
2019-07-30 00:11:51 +08:00
|
|
|
if (!fdb_info->added_by_user)
|
|
|
|
break;
|
2018-03-14 23:55:54 +08:00
|
|
|
if (is_unicast_ether_addr(fdb_info->addr))
|
2020-10-09 23:30:00 +08:00
|
|
|
dpaa2_switch_port_fdb_del_uc(netdev_priv(dev), fdb_info->addr);
|
2018-03-14 23:55:54 +08:00
|
|
|
else
|
2020-10-09 23:30:00 +08:00
|
|
|
dpaa2_switch_port_fdb_del_mc(netdev_priv(dev), fdb_info->addr);
|
2018-03-14 23:55:54 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
rtnl_unlock();
|
|
|
|
kfree(switchdev_work->fdb_info.addr);
|
|
|
|
kfree(switchdev_work);
|
|
|
|
dev_put(dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Called under rcu_read_lock() */
|
2020-10-09 23:30:00 +08:00
|
|
|
static int dpaa2_switch_port_event(struct notifier_block *nb,
|
|
|
|
unsigned long event, void *ptr)
|
2018-03-14 23:55:54 +08:00
|
|
|
{
|
|
|
|
struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
|
2019-11-12 00:50:58 +08:00
|
|
|
struct ethsw_port_priv *port_priv = netdev_priv(dev);
|
2018-03-14 23:55:54 +08:00
|
|
|
struct ethsw_switchdev_event_work *switchdev_work;
|
|
|
|
struct switchdev_notifier_fdb_info *fdb_info = ptr;
|
2019-11-12 00:50:58 +08:00
|
|
|
struct ethsw_core *ethsw = port_priv->ethsw_data;
|
2018-03-14 23:55:54 +08:00
|
|
|
|
2019-02-28 03:44:30 +08:00
|
|
|
if (event == SWITCHDEV_PORT_ATTR_SET)
|
2020-10-09 23:30:00 +08:00
|
|
|
return dpaa2_switch_port_attr_set_event(dev, ptr);
|
2019-02-28 03:44:30 +08:00
|
|
|
|
2021-03-10 20:14:49 +08:00
|
|
|
if (!dpaa2_switch_port_dev_check(dev))
|
|
|
|
return NOTIFY_DONE;
|
|
|
|
|
2018-03-14 23:55:54 +08:00
|
|
|
switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
|
|
|
|
if (!switchdev_work)
|
|
|
|
return NOTIFY_BAD;
|
|
|
|
|
2020-10-09 23:30:00 +08:00
|
|
|
INIT_WORK(&switchdev_work->work, dpaa2_switch_event_work);
|
2018-03-14 23:55:54 +08:00
|
|
|
switchdev_work->dev = dev;
|
|
|
|
switchdev_work->event = event;
|
|
|
|
|
|
|
|
switch (event) {
|
|
|
|
case SWITCHDEV_FDB_ADD_TO_DEVICE:
|
|
|
|
case SWITCHDEV_FDB_DEL_TO_DEVICE:
|
|
|
|
memcpy(&switchdev_work->fdb_info, ptr,
|
|
|
|
sizeof(switchdev_work->fdb_info));
|
|
|
|
switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
|
|
|
|
if (!switchdev_work->fdb_info.addr)
|
|
|
|
goto err_addr_alloc;
|
|
|
|
|
|
|
|
ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
|
|
|
|
fdb_info->addr);
|
|
|
|
|
|
|
|
/* Take a reference on the device to avoid being freed. */
|
|
|
|
dev_hold(dev);
|
|
|
|
break;
|
|
|
|
default:
|
2019-06-08 19:50:31 +08:00
|
|
|
kfree(switchdev_work);
|
2018-03-14 23:55:54 +08:00
|
|
|
return NOTIFY_DONE;
|
|
|
|
}
|
|
|
|
|
2019-11-12 00:50:58 +08:00
|
|
|
queue_work(ethsw->workqueue, &switchdev_work->work);
|
2018-03-14 23:55:54 +08:00
|
|
|
|
|
|
|
return NOTIFY_DONE;
|
|
|
|
|
|
|
|
err_addr_alloc:
|
|
|
|
kfree(switchdev_work);
|
|
|
|
return NOTIFY_BAD;
|
|
|
|
}
|
|
|
|
|
2020-10-09 23:30:00 +08:00
|
|
|
static int dpaa2_switch_port_obj_event(unsigned long event,
|
|
|
|
struct net_device *netdev,
|
|
|
|
struct switchdev_notifier_port_obj_info *port_obj_info)
|
2018-11-23 07:29:30 +08:00
|
|
|
{
|
|
|
|
int err = -EOPNOTSUPP;
|
|
|
|
|
2021-03-10 20:14:49 +08:00
|
|
|
if (!dpaa2_switch_port_dev_check(netdev))
|
|
|
|
return NOTIFY_DONE;
|
|
|
|
|
2018-11-23 07:29:30 +08:00
|
|
|
switch (event) {
|
|
|
|
case SWITCHDEV_PORT_OBJ_ADD:
|
net: switchdev: remove the transaction structure from port object notifiers
Since the introduction of the switchdev API, port objects were
transmitted to drivers for offloading using a two-step transactional
model, with a prepare phase that was supposed to catch all errors, and a
commit phase that was supposed to never fail.
Some classes of failures can never be avoided, like hardware access, or
memory allocation. In the latter case, merely attempting to move the
memory allocation to the preparation phase makes it impossible to avoid
memory leaks, since commit 91cf8eceffc1 ("switchdev: Remove unused
transaction item queue") which has removed the unused mechanism of
passing on the allocated memory between one phase and another.
It is time we admit that separating the preparation from the commit
phase is something that is best left for the driver to decide, and not
something that should be baked into the API, especially since there are
no switchdev callers that depend on this.
This patch removes the struct switchdev_trans member from switchdev port
object notifier structures, and converts drivers to not look at this
member.
Where driver conversion is trivial (like in the case of the Marvell
Prestera driver, NXP DPAA2 switch, TI CPSW, and Rocker drivers), it is
done in this patch.
Where driver conversion needs more attention (DSA, Mellanox Spectrum),
the conversion is left for subsequent patches and here we only fake the
prepare/commit phases at a lower level, just not in the switchdev
notifier itself.
Where the code has a natural structure that is best left alone as a
preparation and a commit phase (as in the case of the Ocelot switch),
that structure is left in place, just made to not depend upon the
switchdev transactional model.
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Acked-by: Linus Walleij <linus.walleij@linaro.org>
Acked-by: Jiri Pirko <jiri@nvidia.com>
Reviewed-by: Ido Schimmel <idosch@nvidia.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2021-01-09 08:01:48 +08:00
|
|
|
err = dpaa2_switch_port_obj_add(netdev, port_obj_info->obj);
|
2018-11-23 07:29:30 +08:00
|
|
|
break;
|
|
|
|
case SWITCHDEV_PORT_OBJ_DEL:
|
2020-10-09 23:30:00 +08:00
|
|
|
err = dpaa2_switch_port_obj_del(netdev, port_obj_info->obj);
|
2018-11-23 07:29:30 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
port_obj_info->handled = true;
|
|
|
|
return notifier_from_errno(err);
|
|
|
|
}
|
|
|
|
|
2020-10-09 23:30:00 +08:00
|
|
|
static int dpaa2_switch_port_blocking_event(struct notifier_block *nb,
|
|
|
|
unsigned long event, void *ptr)
|
2018-11-23 07:29:30 +08:00
|
|
|
{
|
|
|
|
struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
|
|
|
|
|
|
|
|
switch (event) {
|
2020-07-20 14:42:05 +08:00
|
|
|
case SWITCHDEV_PORT_OBJ_ADD:
|
2018-11-23 07:29:30 +08:00
|
|
|
case SWITCHDEV_PORT_OBJ_DEL:
|
2020-10-09 23:30:00 +08:00
|
|
|
return dpaa2_switch_port_obj_event(event, dev, ptr);
|
2019-02-28 03:44:30 +08:00
|
|
|
case SWITCHDEV_PORT_ATTR_SET:
|
2020-10-09 23:30:00 +08:00
|
|
|
return dpaa2_switch_port_attr_set_event(dev, ptr);
|
2018-11-23 07:29:30 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return NOTIFY_DONE;
|
|
|
|
}
|
|
|
|
|
2021-03-10 20:14:44 +08:00
|
|
|
/* Build a linear skb based on a single-buffer frame descriptor */
|
|
|
|
static struct sk_buff *dpaa2_switch_build_linear_skb(struct ethsw_core *ethsw,
|
|
|
|
const struct dpaa2_fd *fd)
|
|
|
|
{
|
|
|
|
u16 fd_offset = dpaa2_fd_get_offset(fd);
|
|
|
|
dma_addr_t addr = dpaa2_fd_get_addr(fd);
|
|
|
|
u32 fd_length = dpaa2_fd_get_len(fd);
|
|
|
|
struct device *dev = ethsw->dev;
|
|
|
|
struct sk_buff *skb = NULL;
|
|
|
|
void *fd_vaddr;
|
|
|
|
|
|
|
|
fd_vaddr = dpaa2_iova_to_virt(ethsw->iommu_domain, addr);
|
|
|
|
dma_unmap_page(dev, addr, DPAA2_SWITCH_RX_BUF_SIZE,
|
|
|
|
DMA_FROM_DEVICE);
|
|
|
|
|
|
|
|
skb = build_skb(fd_vaddr, DPAA2_SWITCH_RX_BUF_SIZE +
|
|
|
|
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
|
|
|
|
if (unlikely(!skb)) {
|
|
|
|
dev_err(dev, "build_skb() failed\n");
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
skb_reserve(skb, fd_offset);
|
|
|
|
skb_put(skb, fd_length);
|
|
|
|
|
|
|
|
ethsw->buf_count--;
|
|
|
|
|
|
|
|
return skb;
|
|
|
|
}
|
|
|
|
|
2021-03-10 20:14:45 +08:00
|
|
|
static void dpaa2_switch_tx_conf(struct dpaa2_switch_fq *fq,
|
|
|
|
const struct dpaa2_fd *fd)
|
|
|
|
{
|
|
|
|
dpaa2_switch_free_fd(fq->ethsw, fd);
|
|
|
|
}
|
|
|
|
|
2021-03-10 20:14:44 +08:00
|
|
|
static void dpaa2_switch_rx(struct dpaa2_switch_fq *fq,
|
|
|
|
const struct dpaa2_fd *fd)
|
|
|
|
{
|
|
|
|
struct ethsw_core *ethsw = fq->ethsw;
|
|
|
|
struct ethsw_port_priv *port_priv;
|
|
|
|
struct net_device *netdev;
|
|
|
|
struct vlan_ethhdr *hdr;
|
|
|
|
struct sk_buff *skb;
|
|
|
|
u16 vlan_tci, vid;
|
|
|
|
int if_id, err;
|
|
|
|
|
|
|
|
/* get switch ingress interface ID */
|
|
|
|
if_id = upper_32_bits(dpaa2_fd_get_flc(fd)) & 0x0000FFFF;
|
|
|
|
|
|
|
|
if (if_id >= ethsw->sw_attr.num_ifs) {
|
|
|
|
dev_err(ethsw->dev, "Frame received from unknown interface!\n");
|
|
|
|
goto err_free_fd;
|
|
|
|
}
|
|
|
|
port_priv = ethsw->ports[if_id];
|
|
|
|
netdev = port_priv->netdev;
|
|
|
|
|
|
|
|
/* build the SKB based on the FD received */
|
|
|
|
if (dpaa2_fd_get_format(fd) != dpaa2_fd_single) {
|
|
|
|
if (net_ratelimit()) {
|
|
|
|
netdev_err(netdev, "Received invalid frame format\n");
|
|
|
|
goto err_free_fd;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
skb = dpaa2_switch_build_linear_skb(ethsw, fd);
|
|
|
|
if (unlikely(!skb))
|
|
|
|
goto err_free_fd;
|
|
|
|
|
|
|
|
skb_reset_mac_header(skb);
|
|
|
|
|
|
|
|
/* Remove the VLAN header if the packet that we just received has a vid
|
|
|
|
* equal to the port PVIDs. Since the dpaa2-switch can operate only in
|
|
|
|
* VLAN-aware mode and no alterations are made on the packet when it's
|
|
|
|
* redirected/mirrored to the control interface, we are sure that there
|
|
|
|
* will always be a VLAN header present.
|
|
|
|
*/
|
|
|
|
hdr = vlan_eth_hdr(skb);
|
|
|
|
vid = ntohs(hdr->h_vlan_TCI) & VLAN_VID_MASK;
|
|
|
|
if (vid == port_priv->pvid) {
|
|
|
|
err = __skb_vlan_pop(skb, &vlan_tci);
|
|
|
|
if (err) {
|
|
|
|
dev_info(ethsw->dev, "__skb_vlan_pop() returned %d", err);
|
|
|
|
goto err_free_fd;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
skb->dev = netdev;
|
|
|
|
skb->protocol = eth_type_trans(skb, skb->dev);
|
|
|
|
|
2021-03-23 04:58:59 +08:00
|
|
|
/* Setup the offload_fwd_mark only if the port is under a bridge */
|
|
|
|
skb->offload_fwd_mark = !!(port_priv->fdb->bridge_dev);
|
|
|
|
|
2021-03-10 20:14:44 +08:00
|
|
|
netif_receive_skb(skb);
|
|
|
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
err_free_fd:
|
|
|
|
dpaa2_switch_free_fd(ethsw, fd);
|
|
|
|
}
|
|
|
|
|
2020-10-09 23:30:00 +08:00
|
|
|
static void dpaa2_switch_detect_features(struct ethsw_core *ethsw)
|
2020-07-14 21:34:31 +08:00
|
|
|
{
|
|
|
|
ethsw->features = 0;
|
|
|
|
|
|
|
|
if (ethsw->major > 8 || (ethsw->major == 8 && ethsw->minor >= 6))
|
|
|
|
ethsw->features |= ETHSW_FEATURE_MAC_ADDR;
|
|
|
|
}
|
|
|
|
|
2021-03-10 20:14:41 +08:00
|
|
|
static int dpaa2_switch_setup_fqs(struct ethsw_core *ethsw)
|
|
|
|
{
|
|
|
|
struct dpsw_ctrl_if_attr ctrl_if_attr;
|
|
|
|
struct device *dev = ethsw->dev;
|
|
|
|
int i = 0;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = dpsw_ctrl_if_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle,
|
|
|
|
&ctrl_if_attr);
|
|
|
|
if (err) {
|
|
|
|
dev_err(dev, "dpsw_ctrl_if_get_attributes() = %d\n", err);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
ethsw->fq[i].fqid = ctrl_if_attr.rx_fqid;
|
|
|
|
ethsw->fq[i].ethsw = ethsw;
|
|
|
|
ethsw->fq[i++].type = DPSW_QUEUE_RX;
|
|
|
|
|
|
|
|
ethsw->fq[i].fqid = ctrl_if_attr.tx_err_conf_fqid;
|
|
|
|
ethsw->fq[i].ethsw = ethsw;
|
|
|
|
ethsw->fq[i++].type = DPSW_QUEUE_TX_ERR_CONF;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-03-10 20:14:44 +08:00
|
|
|
/* Free buffers acquired from the buffer pool or which were meant to
|
|
|
|
* be released in the pool
|
|
|
|
*/
|
|
|
|
static void dpaa2_switch_free_bufs(struct ethsw_core *ethsw, u64 *buf_array, int count)
|
|
|
|
{
|
|
|
|
struct device *dev = ethsw->dev;
|
|
|
|
void *vaddr;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < count; i++) {
|
|
|
|
vaddr = dpaa2_iova_to_virt(ethsw->iommu_domain, buf_array[i]);
|
|
|
|
dma_unmap_page(dev, buf_array[i], DPAA2_SWITCH_RX_BUF_SIZE,
|
|
|
|
DMA_FROM_DEVICE);
|
|
|
|
free_pages((unsigned long)vaddr, 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Perform a single release command to add buffers
|
|
|
|
* to the specified buffer pool
|
|
|
|
*/
|
|
|
|
static int dpaa2_switch_add_bufs(struct ethsw_core *ethsw, u16 bpid)
|
|
|
|
{
|
|
|
|
struct device *dev = ethsw->dev;
|
|
|
|
u64 buf_array[BUFS_PER_CMD];
|
|
|
|
struct page *page;
|
|
|
|
int retries = 0;
|
|
|
|
dma_addr_t addr;
|
|
|
|
int err;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < BUFS_PER_CMD; i++) {
|
|
|
|
/* Allocate one page for each Rx buffer. WRIOP sees
|
|
|
|
* the entire page except for a tailroom reserved for
|
|
|
|
* skb shared info
|
|
|
|
*/
|
|
|
|
page = dev_alloc_pages(0);
|
|
|
|
if (!page) {
|
|
|
|
dev_err(dev, "buffer allocation failed\n");
|
|
|
|
goto err_alloc;
|
|
|
|
}
|
|
|
|
|
|
|
|
addr = dma_map_page(dev, page, 0, DPAA2_SWITCH_RX_BUF_SIZE,
|
|
|
|
DMA_FROM_DEVICE);
|
|
|
|
if (dma_mapping_error(dev, addr)) {
|
|
|
|
dev_err(dev, "dma_map_single() failed\n");
|
|
|
|
goto err_map;
|
|
|
|
}
|
|
|
|
buf_array[i] = addr;
|
|
|
|
}
|
|
|
|
|
|
|
|
release_bufs:
|
|
|
|
/* In case the portal is busy, retry until successful or
|
|
|
|
* max retries hit.
|
|
|
|
*/
|
|
|
|
while ((err = dpaa2_io_service_release(NULL, bpid,
|
|
|
|
buf_array, i)) == -EBUSY) {
|
|
|
|
if (retries++ >= DPAA2_SWITCH_SWP_BUSY_RETRIES)
|
|
|
|
break;
|
|
|
|
|
|
|
|
cpu_relax();
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If release command failed, clean up and bail out. */
|
|
|
|
if (err) {
|
|
|
|
dpaa2_switch_free_bufs(ethsw, buf_array, i);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return i;
|
|
|
|
|
|
|
|
err_map:
|
|
|
|
__free_pages(page, 0);
|
|
|
|
err_alloc:
|
|
|
|
/* If we managed to allocate at least some buffers,
|
|
|
|
* release them to hardware
|
|
|
|
*/
|
|
|
|
if (i)
|
|
|
|
goto release_bufs;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int dpaa2_switch_refill_bp(struct ethsw_core *ethsw)
|
|
|
|
{
|
|
|
|
int *count = ðsw->buf_count;
|
|
|
|
int new_count;
|
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
if (unlikely(*count < DPAA2_ETHSW_REFILL_THRESH)) {
|
|
|
|
do {
|
|
|
|
new_count = dpaa2_switch_add_bufs(ethsw, ethsw->bpid);
|
|
|
|
if (unlikely(!new_count)) {
|
|
|
|
/* Out of memory; abort for now, we'll
|
|
|
|
* try later on
|
|
|
|
*/
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
*count += new_count;
|
|
|
|
} while (*count < DPAA2_ETHSW_NUM_BUFS);
|
|
|
|
|
|
|
|
if (unlikely(*count < DPAA2_ETHSW_NUM_BUFS))
|
|
|
|
err = -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int dpaa2_switch_seed_bp(struct ethsw_core *ethsw)
|
|
|
|
{
|
|
|
|
int *count, i;
|
|
|
|
|
|
|
|
for (i = 0; i < DPAA2_ETHSW_NUM_BUFS; i += BUFS_PER_CMD) {
|
|
|
|
count = ðsw->buf_count;
|
|
|
|
*count += dpaa2_switch_add_bufs(ethsw, ethsw->bpid);
|
|
|
|
|
|
|
|
if (unlikely(*count < BUFS_PER_CMD))
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void dpaa2_switch_drain_bp(struct ethsw_core *ethsw)
|
|
|
|
{
|
|
|
|
u64 buf_array[BUFS_PER_CMD];
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
do {
|
|
|
|
ret = dpaa2_io_service_acquire(NULL, ethsw->bpid,
|
|
|
|
buf_array, BUFS_PER_CMD);
|
|
|
|
if (ret < 0) {
|
|
|
|
dev_err(ethsw->dev,
|
|
|
|
"dpaa2_io_service_acquire() = %d\n", ret);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
dpaa2_switch_free_bufs(ethsw, buf_array, ret);
|
|
|
|
|
|
|
|
} while (ret);
|
|
|
|
}
|
|
|
|
|
2021-03-10 20:14:42 +08:00
|
|
|
static int dpaa2_switch_setup_dpbp(struct ethsw_core *ethsw)
|
|
|
|
{
|
|
|
|
struct dpsw_ctrl_if_pools_cfg dpsw_ctrl_if_pools_cfg = { 0 };
|
|
|
|
struct device *dev = ethsw->dev;
|
|
|
|
struct fsl_mc_device *dpbp_dev;
|
|
|
|
struct dpbp_attr dpbp_attrs;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP,
|
|
|
|
&dpbp_dev);
|
|
|
|
if (err) {
|
|
|
|
if (err == -ENXIO)
|
|
|
|
err = -EPROBE_DEFER;
|
|
|
|
else
|
|
|
|
dev_err(dev, "DPBP device allocation failed\n");
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
ethsw->dpbp_dev = dpbp_dev;
|
|
|
|
|
|
|
|
err = dpbp_open(ethsw->mc_io, 0, dpbp_dev->obj_desc.id,
|
|
|
|
&dpbp_dev->mc_handle);
|
|
|
|
if (err) {
|
|
|
|
dev_err(dev, "dpbp_open() failed\n");
|
|
|
|
goto err_open;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = dpbp_reset(ethsw->mc_io, 0, dpbp_dev->mc_handle);
|
|
|
|
if (err) {
|
|
|
|
dev_err(dev, "dpbp_reset() failed\n");
|
|
|
|
goto err_reset;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = dpbp_enable(ethsw->mc_io, 0, dpbp_dev->mc_handle);
|
|
|
|
if (err) {
|
|
|
|
dev_err(dev, "dpbp_enable() failed\n");
|
|
|
|
goto err_enable;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = dpbp_get_attributes(ethsw->mc_io, 0, dpbp_dev->mc_handle,
|
|
|
|
&dpbp_attrs);
|
|
|
|
if (err) {
|
|
|
|
dev_err(dev, "dpbp_get_attributes() failed\n");
|
|
|
|
goto err_get_attr;
|
|
|
|
}
|
|
|
|
|
|
|
|
dpsw_ctrl_if_pools_cfg.num_dpbp = 1;
|
|
|
|
dpsw_ctrl_if_pools_cfg.pools[0].dpbp_id = dpbp_attrs.id;
|
|
|
|
dpsw_ctrl_if_pools_cfg.pools[0].buffer_size = DPAA2_SWITCH_RX_BUF_SIZE;
|
|
|
|
dpsw_ctrl_if_pools_cfg.pools[0].backup_pool = 0;
|
|
|
|
|
|
|
|
err = dpsw_ctrl_if_set_pools(ethsw->mc_io, 0, ethsw->dpsw_handle,
|
|
|
|
&dpsw_ctrl_if_pools_cfg);
|
|
|
|
if (err) {
|
|
|
|
dev_err(dev, "dpsw_ctrl_if_set_pools() failed\n");
|
|
|
|
goto err_get_attr;
|
|
|
|
}
|
|
|
|
ethsw->bpid = dpbp_attrs.id;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_get_attr:
|
|
|
|
dpbp_disable(ethsw->mc_io, 0, dpbp_dev->mc_handle);
|
|
|
|
err_enable:
|
|
|
|
err_reset:
|
|
|
|
dpbp_close(ethsw->mc_io, 0, dpbp_dev->mc_handle);
|
|
|
|
err_open:
|
|
|
|
fsl_mc_object_free(dpbp_dev);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void dpaa2_switch_free_dpbp(struct ethsw_core *ethsw)
|
|
|
|
{
|
|
|
|
dpbp_disable(ethsw->mc_io, 0, ethsw->dpbp_dev->mc_handle);
|
|
|
|
dpbp_close(ethsw->mc_io, 0, ethsw->dpbp_dev->mc_handle);
|
|
|
|
fsl_mc_object_free(ethsw->dpbp_dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int dpaa2_switch_alloc_rings(struct ethsw_core *ethsw)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++) {
|
|
|
|
ethsw->fq[i].store =
|
|
|
|
dpaa2_io_store_create(DPAA2_SWITCH_STORE_SIZE,
|
|
|
|
ethsw->dev);
|
|
|
|
if (!ethsw->fq[i].store) {
|
|
|
|
dev_err(ethsw->dev, "dpaa2_io_store_create failed\n");
|
|
|
|
while (--i >= 0)
|
|
|
|
dpaa2_io_store_destroy(ethsw->fq[i].store);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void dpaa2_switch_destroy_rings(struct ethsw_core *ethsw)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++)
|
|
|
|
dpaa2_io_store_destroy(ethsw->fq[i].store);
|
|
|
|
}
|
|
|
|
|
2021-03-10 20:14:44 +08:00
|
|
|
static int dpaa2_switch_pull_fq(struct dpaa2_switch_fq *fq)
|
|
|
|
{
|
|
|
|
int err, retries = 0;
|
|
|
|
|
|
|
|
/* Try to pull from the FQ while the portal is busy and we didn't hit
|
|
|
|
* the maximum number fo retries
|
|
|
|
*/
|
|
|
|
do {
|
|
|
|
err = dpaa2_io_service_pull_fq(NULL, fq->fqid, fq->store);
|
|
|
|
cpu_relax();
|
|
|
|
} while (err == -EBUSY && retries++ < DPAA2_SWITCH_SWP_BUSY_RETRIES);
|
|
|
|
|
|
|
|
if (unlikely(err))
|
|
|
|
dev_err(fq->ethsw->dev, "dpaa2_io_service_pull err %d", err);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Consume all frames pull-dequeued into the store */
|
|
|
|
static int dpaa2_switch_store_consume(struct dpaa2_switch_fq *fq)
|
|
|
|
{
|
|
|
|
struct ethsw_core *ethsw = fq->ethsw;
|
|
|
|
int cleaned = 0, is_last;
|
|
|
|
struct dpaa2_dq *dq;
|
|
|
|
int retries = 0;
|
|
|
|
|
|
|
|
do {
|
|
|
|
/* Get the next available FD from the store */
|
|
|
|
dq = dpaa2_io_store_next(fq->store, &is_last);
|
|
|
|
if (unlikely(!dq)) {
|
|
|
|
if (retries++ >= DPAA2_SWITCH_SWP_BUSY_RETRIES) {
|
|
|
|
dev_err_once(ethsw->dev,
|
|
|
|
"No valid dequeue response\n");
|
|
|
|
return -ETIMEDOUT;
|
|
|
|
}
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2021-03-10 20:14:45 +08:00
|
|
|
if (fq->type == DPSW_QUEUE_RX)
|
|
|
|
dpaa2_switch_rx(fq, dpaa2_dq_fd(dq));
|
|
|
|
else
|
|
|
|
dpaa2_switch_tx_conf(fq, dpaa2_dq_fd(dq));
|
2021-03-10 20:14:44 +08:00
|
|
|
cleaned++;
|
|
|
|
|
|
|
|
} while (!is_last);
|
|
|
|
|
|
|
|
return cleaned;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* NAPI poll routine */
|
|
|
|
static int dpaa2_switch_poll(struct napi_struct *napi, int budget)
|
|
|
|
{
|
|
|
|
int err, cleaned = 0, store_cleaned, work_done;
|
|
|
|
struct dpaa2_switch_fq *fq;
|
|
|
|
int retries = 0;
|
|
|
|
|
|
|
|
fq = container_of(napi, struct dpaa2_switch_fq, napi);
|
|
|
|
|
|
|
|
do {
|
|
|
|
err = dpaa2_switch_pull_fq(fq);
|
|
|
|
if (unlikely(err))
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* Refill pool if appropriate */
|
|
|
|
dpaa2_switch_refill_bp(fq->ethsw);
|
|
|
|
|
|
|
|
store_cleaned = dpaa2_switch_store_consume(fq);
|
|
|
|
cleaned += store_cleaned;
|
|
|
|
|
|
|
|
if (cleaned >= budget) {
|
|
|
|
work_done = budget;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
} while (store_cleaned);
|
|
|
|
|
|
|
|
/* We didn't consume the entire budget, so finish napi and re-enable
|
|
|
|
* data availability notifications
|
|
|
|
*/
|
|
|
|
napi_complete_done(napi, cleaned);
|
|
|
|
do {
|
|
|
|
err = dpaa2_io_service_rearm(NULL, &fq->nctx);
|
|
|
|
cpu_relax();
|
|
|
|
} while (err == -EBUSY && retries++ < DPAA2_SWITCH_SWP_BUSY_RETRIES);
|
|
|
|
|
|
|
|
work_done = max(cleaned, 1);
|
|
|
|
out:
|
|
|
|
|
|
|
|
return work_done;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void dpaa2_switch_fqdan_cb(struct dpaa2_io_notification_ctx *nctx)
|
|
|
|
{
|
|
|
|
struct dpaa2_switch_fq *fq;
|
|
|
|
|
|
|
|
fq = container_of(nctx, struct dpaa2_switch_fq, nctx);
|
|
|
|
|
|
|
|
napi_schedule(&fq->napi);
|
|
|
|
}
|
|
|
|
|
2021-03-10 20:14:43 +08:00
|
|
|
static int dpaa2_switch_setup_dpio(struct ethsw_core *ethsw)
|
|
|
|
{
|
|
|
|
struct dpsw_ctrl_if_queue_cfg queue_cfg;
|
|
|
|
struct dpaa2_io_notification_ctx *nctx;
|
|
|
|
int err, i, j;
|
|
|
|
|
|
|
|
for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++) {
|
|
|
|
nctx = ðsw->fq[i].nctx;
|
|
|
|
|
|
|
|
/* Register a new software context for the FQID.
|
|
|
|
* By using NULL as the first parameter, we specify that we do
|
|
|
|
* not care on which cpu are interrupts received for this queue
|
|
|
|
*/
|
|
|
|
nctx->is_cdan = 0;
|
|
|
|
nctx->id = ethsw->fq[i].fqid;
|
|
|
|
nctx->desired_cpu = DPAA2_IO_ANY_CPU;
|
2021-03-10 20:14:44 +08:00
|
|
|
nctx->cb = dpaa2_switch_fqdan_cb;
|
2021-03-10 20:14:43 +08:00
|
|
|
err = dpaa2_io_service_register(NULL, nctx, ethsw->dev);
|
|
|
|
if (err) {
|
|
|
|
err = -EPROBE_DEFER;
|
|
|
|
goto err_register;
|
|
|
|
}
|
|
|
|
|
|
|
|
queue_cfg.options = DPSW_CTRL_IF_QUEUE_OPT_DEST |
|
|
|
|
DPSW_CTRL_IF_QUEUE_OPT_USER_CTX;
|
|
|
|
queue_cfg.dest_cfg.dest_type = DPSW_CTRL_IF_DEST_DPIO;
|
|
|
|
queue_cfg.dest_cfg.dest_id = nctx->dpio_id;
|
|
|
|
queue_cfg.dest_cfg.priority = 0;
|
|
|
|
queue_cfg.user_ctx = nctx->qman64;
|
|
|
|
|
|
|
|
err = dpsw_ctrl_if_set_queue(ethsw->mc_io, 0,
|
|
|
|
ethsw->dpsw_handle,
|
|
|
|
ethsw->fq[i].type,
|
|
|
|
&queue_cfg);
|
|
|
|
if (err)
|
|
|
|
goto err_set_queue;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_set_queue:
|
|
|
|
dpaa2_io_service_deregister(NULL, nctx, ethsw->dev);
|
|
|
|
err_register:
|
|
|
|
for (j = 0; j < i; j++)
|
|
|
|
dpaa2_io_service_deregister(NULL, ðsw->fq[j].nctx,
|
|
|
|
ethsw->dev);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void dpaa2_switch_free_dpio(struct ethsw_core *ethsw)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++)
|
|
|
|
dpaa2_io_service_deregister(NULL, ðsw->fq[i].nctx,
|
|
|
|
ethsw->dev);
|
|
|
|
}
|
|
|
|
|
2021-03-10 20:14:41 +08:00
|
|
|
static int dpaa2_switch_ctrl_if_setup(struct ethsw_core *ethsw)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
|
|
|
/* setup FQs for Rx and Tx Conf */
|
|
|
|
err = dpaa2_switch_setup_fqs(ethsw);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
2021-03-10 20:14:42 +08:00
|
|
|
/* setup the buffer pool needed on the Rx path */
|
|
|
|
err = dpaa2_switch_setup_dpbp(ethsw);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
2021-03-10 20:14:44 +08:00
|
|
|
err = dpaa2_switch_seed_bp(ethsw);
|
2021-03-10 20:14:42 +08:00
|
|
|
if (err)
|
|
|
|
goto err_free_dpbp;
|
|
|
|
|
2021-03-10 20:14:44 +08:00
|
|
|
err = dpaa2_switch_alloc_rings(ethsw);
|
|
|
|
if (err)
|
|
|
|
goto err_drain_dpbp;
|
|
|
|
|
2021-03-10 20:14:43 +08:00
|
|
|
err = dpaa2_switch_setup_dpio(ethsw);
|
|
|
|
if (err)
|
|
|
|
goto err_destroy_rings;
|
|
|
|
|
2021-03-10 20:14:46 +08:00
|
|
|
err = dpsw_ctrl_if_enable(ethsw->mc_io, 0, ethsw->dpsw_handle);
|
|
|
|
if (err) {
|
|
|
|
dev_err(ethsw->dev, "dpsw_ctrl_if_enable err %d\n", err);
|
|
|
|
goto err_deregister_dpio;
|
|
|
|
}
|
|
|
|
|
2021-03-10 20:14:41 +08:00
|
|
|
return 0;
|
2021-03-10 20:14:42 +08:00
|
|
|
|
2021-03-10 20:14:46 +08:00
|
|
|
err_deregister_dpio:
|
|
|
|
dpaa2_switch_free_dpio(ethsw);
|
2021-03-10 20:14:43 +08:00
|
|
|
err_destroy_rings:
|
|
|
|
dpaa2_switch_destroy_rings(ethsw);
|
2021-03-10 20:14:44 +08:00
|
|
|
err_drain_dpbp:
|
|
|
|
dpaa2_switch_drain_bp(ethsw);
|
2021-03-10 20:14:42 +08:00
|
|
|
err_free_dpbp:
|
|
|
|
dpaa2_switch_free_dpbp(ethsw);
|
|
|
|
|
|
|
|
return err;
|
2021-03-10 20:14:41 +08:00
|
|
|
}
|
|
|
|
|
2020-10-09 23:30:00 +08:00
|
|
|
static int dpaa2_switch_init(struct fsl_mc_device *sw_dev)
|
2018-03-14 23:55:54 +08:00
|
|
|
{
|
|
|
|
struct device *dev = &sw_dev->dev;
|
|
|
|
struct ethsw_core *ethsw = dev_get_drvdata(dev);
|
2021-03-10 20:14:39 +08:00
|
|
|
struct dpsw_vlan_if_cfg vcfg = {0};
|
|
|
|
struct dpsw_tci_cfg tci_cfg = {0};
|
2018-03-14 23:55:54 +08:00
|
|
|
struct dpsw_stp_cfg stp_cfg;
|
|
|
|
int err;
|
2020-07-14 21:34:30 +08:00
|
|
|
u16 i;
|
2018-03-14 23:55:54 +08:00
|
|
|
|
|
|
|
ethsw->dev_id = sw_dev->obj_desc.id;
|
|
|
|
|
|
|
|
err = dpsw_open(ethsw->mc_io, 0, ethsw->dev_id, ðsw->dpsw_handle);
|
|
|
|
if (err) {
|
|
|
|
dev_err(dev, "dpsw_open err %d\n", err);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = dpsw_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle,
|
|
|
|
ðsw->sw_attr);
|
|
|
|
if (err) {
|
|
|
|
dev_err(dev, "dpsw_get_attributes err %d\n", err);
|
|
|
|
goto err_close;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = dpsw_get_api_version(ethsw->mc_io, 0,
|
2020-07-14 21:34:30 +08:00
|
|
|
ðsw->major,
|
|
|
|
ðsw->minor);
|
2018-03-14 23:55:54 +08:00
|
|
|
if (err) {
|
|
|
|
dev_err(dev, "dpsw_get_api_version err %d\n", err);
|
|
|
|
goto err_close;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Minimum supported DPSW version check */
|
2020-07-14 21:34:30 +08:00
|
|
|
if (ethsw->major < DPSW_MIN_VER_MAJOR ||
|
|
|
|
(ethsw->major == DPSW_MIN_VER_MAJOR &&
|
|
|
|
ethsw->minor < DPSW_MIN_VER_MINOR)) {
|
2021-03-10 20:14:41 +08:00
|
|
|
dev_err(dev, "DPSW version %d:%d not supported. Use firmware 10.28.0 or greater.\n",
|
|
|
|
ethsw->major, ethsw->minor);
|
|
|
|
err = -EOPNOTSUPP;
|
|
|
|
goto err_close;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!dpaa2_switch_supports_cpu_traffic(ethsw)) {
|
|
|
|
err = -EOPNOTSUPP;
|
2018-03-14 23:55:54 +08:00
|
|
|
goto err_close;
|
|
|
|
}
|
|
|
|
|
2020-10-09 23:30:00 +08:00
|
|
|
dpaa2_switch_detect_features(ethsw);
|
2020-07-14 21:34:31 +08:00
|
|
|
|
2018-03-14 23:55:54 +08:00
|
|
|
err = dpsw_reset(ethsw->mc_io, 0, ethsw->dpsw_handle);
|
|
|
|
if (err) {
|
|
|
|
dev_err(dev, "dpsw_reset err %d\n", err);
|
|
|
|
goto err_close;
|
|
|
|
}
|
|
|
|
|
|
|
|
stp_cfg.vlan_id = DEFAULT_VLAN_ID;
|
|
|
|
stp_cfg.state = DPSW_STP_STATE_FORWARDING;
|
|
|
|
|
|
|
|
for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
|
2021-03-10 20:14:39 +08:00
|
|
|
err = dpsw_if_disable(ethsw->mc_io, 0, ethsw->dpsw_handle, i);
|
|
|
|
if (err) {
|
|
|
|
dev_err(dev, "dpsw_if_disable err %d\n", err);
|
|
|
|
goto err_close;
|
|
|
|
}
|
|
|
|
|
2018-03-14 23:55:54 +08:00
|
|
|
err = dpsw_if_set_stp(ethsw->mc_io, 0, ethsw->dpsw_handle, i,
|
|
|
|
&stp_cfg);
|
|
|
|
if (err) {
|
|
|
|
dev_err(dev, "dpsw_if_set_stp err %d for port %d\n",
|
|
|
|
err, i);
|
|
|
|
goto err_close;
|
|
|
|
}
|
2021-03-10 20:14:39 +08:00
|
|
|
|
|
|
|
/* Switch starts with all ports configured to VLAN 1. Need to
|
|
|
|
* remove this setting to allow configuration at bridge join
|
|
|
|
*/
|
|
|
|
vcfg.num_ifs = 1;
|
|
|
|
vcfg.if_id[0] = i;
|
|
|
|
err = dpsw_vlan_remove_if_untagged(ethsw->mc_io, 0, ethsw->dpsw_handle,
|
|
|
|
DEFAULT_VLAN_ID, &vcfg);
|
|
|
|
if (err) {
|
|
|
|
dev_err(dev, "dpsw_vlan_remove_if_untagged err %d\n",
|
|
|
|
err);
|
|
|
|
goto err_close;
|
|
|
|
}
|
|
|
|
|
|
|
|
tci_cfg.vlan_id = 4095;
|
|
|
|
err = dpsw_if_set_tci(ethsw->mc_io, 0, ethsw->dpsw_handle, i, &tci_cfg);
|
|
|
|
if (err) {
|
|
|
|
dev_err(dev, "dpsw_if_set_tci err %d\n", err);
|
|
|
|
goto err_close;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = dpsw_vlan_remove_if(ethsw->mc_io, 0, ethsw->dpsw_handle,
|
|
|
|
DEFAULT_VLAN_ID, &vcfg);
|
|
|
|
if (err) {
|
|
|
|
dev_err(dev, "dpsw_vlan_remove_if err %d\n", err);
|
|
|
|
goto err_close;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
err = dpsw_vlan_remove(ethsw->mc_io, 0, ethsw->dpsw_handle, DEFAULT_VLAN_ID);
|
|
|
|
if (err) {
|
|
|
|
dev_err(dev, "dpsw_vlan_remove err %d\n", err);
|
|
|
|
goto err_close;
|
2018-03-14 23:55:54 +08:00
|
|
|
}
|
|
|
|
|
2019-11-12 00:50:58 +08:00
|
|
|
ethsw->workqueue = alloc_ordered_workqueue("%s_%d_ordered",
|
|
|
|
WQ_MEM_RECLAIM, "ethsw",
|
|
|
|
ethsw->sw_attr.id);
|
|
|
|
if (!ethsw->workqueue) {
|
2018-03-14 23:55:54 +08:00
|
|
|
err = -ENOMEM;
|
|
|
|
goto err_close;
|
|
|
|
}
|
|
|
|
|
staging: dpaa2-switch: properly setup switching domains
Until now, the DPAA2 switch was not capable to properly setup its
switching domains depending on the existence, or lack thereof, of a
upper bridge device. This meant that all switch ports of a DPSW object
were switching by default even though they were not under the same
bridge device.
Another issue was the inability to actually add the CPU in the flooding
domains (broadcast, unknown unicast etc) of a particular switch port.
This meant that a simple ping on a switch interface was not possible
since no broadcast ARP frame would actually reach the CPU queues.
This patch tries to fix exactly these problems by:
* Creating and managing a FDB table for each flooding domain. This means
that when a switch interface is not bridged it will use its own FDB
table. While in bridged mode all DPAA2 switch interfaces under the
same upper will use the same FDB table, thus leverage the same FDB
entries.
* Adding a new MC firmware command - dpsw_set_egress_flood() - through
which the driver can setup the flooding domains as needed. For
example, when the switch interface is standalone, thus not in a
bridge with any other DPAA2 switch port, it will setup its broadcast
and unknown unicast flooding domains to only include the control
interface (the queues that reach the CPU and the driver can dequeue
from). This flooding domain changes when the interface joins a bridge
and is configured to include, beside the control interface, all other
DPAA2 switch interfaces.
We impose a minimum limit of FDB tables available equal to the number of
switch interfaces so that we guarantee that, in the maximal
configuration - all interfaces are standalone, each switch port will
have a private FDB table. At the same time, we only probe DPSW objects
that have the flooding and broadcast replicators configured to be per
FDB (DPSW_*_PER_FDB). Without this, the dpaa2-switch driver would not
be able to configure multiple switching domains.
At probe time, a FDB table will be allocated for each port. At a bridge
join event, the switch port will either continue to use the current FDB
table (if it's the first dpaa2-switch port to join that bridge) or will
switch to use the FDB table associated with the port that it's already
under the bridge. If a FDB switch is necessary, the private FDB table
which was previously used will be returned to the pool of unused FDBs.
Upon a bridge leave, the switch port needs a private FDB table thus it
will search and get the first unused FDB table. This way, all the other
ports remaining under the bridge will continue to use the same FDB
table.
Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
Reviewed-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-03-10 20:14:47 +08:00
|
|
|
err = dpsw_fdb_remove(ethsw->mc_io, 0, ethsw->dpsw_handle, 0);
|
|
|
|
if (err)
|
|
|
|
goto err_destroy_ordered_workqueue;
|
|
|
|
|
2021-03-10 20:14:41 +08:00
|
|
|
err = dpaa2_switch_ctrl_if_setup(ethsw);
|
|
|
|
if (err)
|
|
|
|
goto err_destroy_ordered_workqueue;
|
|
|
|
|
2018-03-14 23:55:54 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_destroy_ordered_workqueue:
|
2019-11-12 00:50:58 +08:00
|
|
|
destroy_workqueue(ethsw->workqueue);
|
2018-03-14 23:55:54 +08:00
|
|
|
|
|
|
|
err_close:
|
|
|
|
dpsw_close(ethsw->mc_io, 0, ethsw->dpsw_handle);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2021-03-30 22:54:18 +08:00
|
|
|
/* Add an ACL to redirect frames with specific destination MAC address to
|
|
|
|
* control interface
|
|
|
|
*/
|
|
|
|
static int dpaa2_switch_port_trap_mac_addr(struct ethsw_port_priv *port_priv,
|
|
|
|
const char *mac)
|
|
|
|
{
|
|
|
|
struct net_device *netdev = port_priv->netdev;
|
|
|
|
struct dpsw_acl_entry_cfg acl_entry_cfg;
|
|
|
|
struct dpsw_acl_fields *acl_h;
|
|
|
|
struct dpsw_acl_fields *acl_m;
|
|
|
|
struct dpsw_acl_key acl_key;
|
|
|
|
struct device *dev;
|
|
|
|
u8 *cmd_buff;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
dev = port_priv->netdev->dev.parent;
|
|
|
|
acl_h = &acl_key.match;
|
|
|
|
acl_m = &acl_key.mask;
|
|
|
|
|
|
|
|
if (port_priv->acl_num_rules >= DPAA2_ETHSW_PORT_MAX_ACL_ENTRIES) {
|
|
|
|
netdev_err(netdev, "ACL full\n");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
memset(&acl_entry_cfg, 0, sizeof(acl_entry_cfg));
|
|
|
|
memset(&acl_key, 0, sizeof(acl_key));
|
|
|
|
|
|
|
|
/* Match on the destination MAC address */
|
|
|
|
ether_addr_copy(acl_h->l2_dest_mac, mac);
|
|
|
|
eth_broadcast_addr(acl_m->l2_dest_mac);
|
|
|
|
|
|
|
|
cmd_buff = kzalloc(DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE, GFP_KERNEL);
|
|
|
|
if (!cmd_buff)
|
|
|
|
return -ENOMEM;
|
|
|
|
dpsw_acl_prepare_entry_cfg(&acl_key, cmd_buff);
|
|
|
|
|
|
|
|
memset(&acl_entry_cfg, 0, sizeof(acl_entry_cfg));
|
|
|
|
acl_entry_cfg.precedence = port_priv->acl_num_rules;
|
|
|
|
acl_entry_cfg.result.action = DPSW_ACL_ACTION_REDIRECT_TO_CTRL_IF;
|
|
|
|
acl_entry_cfg.key_iova = dma_map_single(dev, cmd_buff,
|
|
|
|
DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE,
|
|
|
|
DMA_TO_DEVICE);
|
|
|
|
if (unlikely(dma_mapping_error(dev, acl_entry_cfg.key_iova))) {
|
|
|
|
netdev_err(netdev, "DMA mapping failed\n");
|
|
|
|
return -EFAULT;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = dpsw_acl_add_entry(port_priv->ethsw_data->mc_io, 0,
|
|
|
|
port_priv->ethsw_data->dpsw_handle,
|
|
|
|
port_priv->acl_tbl, &acl_entry_cfg);
|
|
|
|
|
|
|
|
dma_unmap_single(dev, acl_entry_cfg.key_iova, sizeof(cmd_buff),
|
|
|
|
DMA_TO_DEVICE);
|
|
|
|
if (err) {
|
|
|
|
netdev_err(netdev, "dpsw_acl_add_entry() failed %d\n", err);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
port_priv->acl_num_rules++;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-10-09 23:30:00 +08:00
|
|
|
static int dpaa2_switch_port_init(struct ethsw_port_priv *port_priv, u16 port)
|
2018-03-14 23:55:54 +08:00
|
|
|
{
|
2021-03-30 22:54:18 +08:00
|
|
|
const char stpa[ETH_ALEN] = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x00};
|
2021-03-10 20:14:39 +08:00
|
|
|
struct switchdev_obj_port_vlan vlan = {
|
|
|
|
.obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
|
|
|
|
.vid = DEFAULT_VLAN_ID,
|
|
|
|
.flags = BRIDGE_VLAN_INFO_UNTAGGED | BRIDGE_VLAN_INFO_PVID,
|
|
|
|
};
|
2018-03-14 23:55:54 +08:00
|
|
|
struct net_device *netdev = port_priv->netdev;
|
2021-03-10 20:14:45 +08:00
|
|
|
struct ethsw_core *ethsw = port_priv->ethsw_data;
|
staging: dpaa2-switch: properly setup switching domains
Until now, the DPAA2 switch was not capable to properly setup its
switching domains depending on the existence, or lack thereof, of a
upper bridge device. This meant that all switch ports of a DPSW object
were switching by default even though they were not under the same
bridge device.
Another issue was the inability to actually add the CPU in the flooding
domains (broadcast, unknown unicast etc) of a particular switch port.
This meant that a simple ping on a switch interface was not possible
since no broadcast ARP frame would actually reach the CPU queues.
This patch tries to fix exactly these problems by:
* Creating and managing a FDB table for each flooding domain. This means
that when a switch interface is not bridged it will use its own FDB
table. While in bridged mode all DPAA2 switch interfaces under the
same upper will use the same FDB table, thus leverage the same FDB
entries.
* Adding a new MC firmware command - dpsw_set_egress_flood() - through
which the driver can setup the flooding domains as needed. For
example, when the switch interface is standalone, thus not in a
bridge with any other DPAA2 switch port, it will setup its broadcast
and unknown unicast flooding domains to only include the control
interface (the queues that reach the CPU and the driver can dequeue
from). This flooding domain changes when the interface joins a bridge
and is configured to include, beside the control interface, all other
DPAA2 switch interfaces.
We impose a minimum limit of FDB tables available equal to the number of
switch interfaces so that we guarantee that, in the maximal
configuration - all interfaces are standalone, each switch port will
have a private FDB table. At the same time, we only probe DPSW objects
that have the flooding and broadcast replicators configured to be per
FDB (DPSW_*_PER_FDB). Without this, the dpaa2-switch driver would not
be able to configure multiple switching domains.
At probe time, a FDB table will be allocated for each port. At a bridge
join event, the switch port will either continue to use the current FDB
table (if it's the first dpaa2-switch port to join that bridge) or will
switch to use the FDB table associated with the port that it's already
under the bridge. If a FDB switch is necessary, the private FDB table
which was previously used will be returned to the pool of unused FDBs.
Upon a bridge leave, the switch port needs a private FDB table thus it
will search and get the first unused FDB table. This way, all the other
ports remaining under the bridge will continue to use the same FDB
table.
Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
Reviewed-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-03-10 20:14:47 +08:00
|
|
|
struct dpsw_fdb_cfg fdb_cfg = {0};
|
2021-03-30 22:54:16 +08:00
|
|
|
struct dpsw_acl_if_cfg acl_if_cfg;
|
2021-03-10 20:14:45 +08:00
|
|
|
struct dpsw_if_attr dpsw_if_attr;
|
2021-03-30 22:54:16 +08:00
|
|
|
struct dpaa2_switch_fdb *fdb;
|
|
|
|
struct dpsw_acl_cfg acl_cfg;
|
staging: dpaa2-switch: properly setup switching domains
Until now, the DPAA2 switch was not capable to properly setup its
switching domains depending on the existence, or lack thereof, of a
upper bridge device. This meant that all switch ports of a DPSW object
were switching by default even though they were not under the same
bridge device.
Another issue was the inability to actually add the CPU in the flooding
domains (broadcast, unknown unicast etc) of a particular switch port.
This meant that a simple ping on a switch interface was not possible
since no broadcast ARP frame would actually reach the CPU queues.
This patch tries to fix exactly these problems by:
* Creating and managing a FDB table for each flooding domain. This means
that when a switch interface is not bridged it will use its own FDB
table. While in bridged mode all DPAA2 switch interfaces under the
same upper will use the same FDB table, thus leverage the same FDB
entries.
* Adding a new MC firmware command - dpsw_set_egress_flood() - through
which the driver can setup the flooding domains as needed. For
example, when the switch interface is standalone, thus not in a
bridge with any other DPAA2 switch port, it will setup its broadcast
and unknown unicast flooding domains to only include the control
interface (the queues that reach the CPU and the driver can dequeue
from). This flooding domain changes when the interface joins a bridge
and is configured to include, beside the control interface, all other
DPAA2 switch interfaces.
We impose a minimum limit of FDB tables available equal to the number of
switch interfaces so that we guarantee that, in the maximal
configuration - all interfaces are standalone, each switch port will
have a private FDB table. At the same time, we only probe DPSW objects
that have the flooding and broadcast replicators configured to be per
FDB (DPSW_*_PER_FDB). Without this, the dpaa2-switch driver would not
be able to configure multiple switching domains.
At probe time, a FDB table will be allocated for each port. At a bridge
join event, the switch port will either continue to use the current FDB
table (if it's the first dpaa2-switch port to join that bridge) or will
switch to use the FDB table associated with the port that it's already
under the bridge. If a FDB switch is necessary, the private FDB table
which was previously used will be returned to the pool of unused FDBs.
Upon a bridge leave, the switch port needs a private FDB table thus it
will search and get the first unused FDB table. This way, all the other
ports remaining under the bridge will continue to use the same FDB
table.
Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
Reviewed-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-03-10 20:14:47 +08:00
|
|
|
u16 fdb_id;
|
2018-03-14 23:55:54 +08:00
|
|
|
int err;
|
|
|
|
|
2021-03-10 20:14:45 +08:00
|
|
|
/* Get the Tx queue for this specific port */
|
|
|
|
err = dpsw_if_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle,
|
|
|
|
port_priv->idx, &dpsw_if_attr);
|
|
|
|
if (err) {
|
|
|
|
netdev_err(netdev, "dpsw_if_get_attributes err %d\n", err);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
port_priv->tx_qdid = dpsw_if_attr.qdid;
|
|
|
|
|
staging: dpaa2-switch: properly setup switching domains
Until now, the DPAA2 switch was not capable to properly setup its
switching domains depending on the existence, or lack thereof, of a
upper bridge device. This meant that all switch ports of a DPSW object
were switching by default even though they were not under the same
bridge device.
Another issue was the inability to actually add the CPU in the flooding
domains (broadcast, unknown unicast etc) of a particular switch port.
This meant that a simple ping on a switch interface was not possible
since no broadcast ARP frame would actually reach the CPU queues.
This patch tries to fix exactly these problems by:
* Creating and managing a FDB table for each flooding domain. This means
that when a switch interface is not bridged it will use its own FDB
table. While in bridged mode all DPAA2 switch interfaces under the
same upper will use the same FDB table, thus leverage the same FDB
entries.
* Adding a new MC firmware command - dpsw_set_egress_flood() - through
which the driver can setup the flooding domains as needed. For
example, when the switch interface is standalone, thus not in a
bridge with any other DPAA2 switch port, it will setup its broadcast
and unknown unicast flooding domains to only include the control
interface (the queues that reach the CPU and the driver can dequeue
from). This flooding domain changes when the interface joins a bridge
and is configured to include, beside the control interface, all other
DPAA2 switch interfaces.
We impose a minimum limit of FDB tables available equal to the number of
switch interfaces so that we guarantee that, in the maximal
configuration - all interfaces are standalone, each switch port will
have a private FDB table. At the same time, we only probe DPSW objects
that have the flooding and broadcast replicators configured to be per
FDB (DPSW_*_PER_FDB). Without this, the dpaa2-switch driver would not
be able to configure multiple switching domains.
At probe time, a FDB table will be allocated for each port. At a bridge
join event, the switch port will either continue to use the current FDB
table (if it's the first dpaa2-switch port to join that bridge) or will
switch to use the FDB table associated with the port that it's already
under the bridge. If a FDB switch is necessary, the private FDB table
which was previously used will be returned to the pool of unused FDBs.
Upon a bridge leave, the switch port needs a private FDB table thus it
will search and get the first unused FDB table. This way, all the other
ports remaining under the bridge will continue to use the same FDB
table.
Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
Reviewed-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-03-10 20:14:47 +08:00
|
|
|
/* Create a FDB table for this particular switch port */
|
|
|
|
fdb_cfg.num_fdb_entries = ethsw->sw_attr.max_fdb_entries / ethsw->sw_attr.num_ifs;
|
|
|
|
err = dpsw_fdb_add(ethsw->mc_io, 0, ethsw->dpsw_handle,
|
|
|
|
&fdb_id, &fdb_cfg);
|
|
|
|
if (err) {
|
|
|
|
netdev_err(netdev, "dpsw_fdb_add err %d\n", err);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Find an unused dpaa2_switch_fdb structure and use it */
|
|
|
|
fdb = dpaa2_switch_fdb_get_unused(ethsw);
|
|
|
|
fdb->fdb_id = fdb_id;
|
|
|
|
fdb->in_use = true;
|
|
|
|
fdb->bridge_dev = NULL;
|
|
|
|
port_priv->fdb = fdb;
|
|
|
|
|
2021-03-10 20:14:39 +08:00
|
|
|
/* We need to add VLAN 1 as the PVID on this port until it is under a
|
|
|
|
* bridge since the DPAA2 switch is not able to handle the traffic in a
|
|
|
|
* VLAN unaware fashion
|
2018-03-14 23:55:54 +08:00
|
|
|
*/
|
2021-03-10 20:14:39 +08:00
|
|
|
err = dpaa2_switch_port_vlans_add(netdev, &vlan);
|
2018-03-14 23:55:54 +08:00
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
staging: dpaa2-switch: properly setup switching domains
Until now, the DPAA2 switch was not capable to properly setup its
switching domains depending on the existence, or lack thereof, of a
upper bridge device. This meant that all switch ports of a DPSW object
were switching by default even though they were not under the same
bridge device.
Another issue was the inability to actually add the CPU in the flooding
domains (broadcast, unknown unicast etc) of a particular switch port.
This meant that a simple ping on a switch interface was not possible
since no broadcast ARP frame would actually reach the CPU queues.
This patch tries to fix exactly these problems by:
* Creating and managing a FDB table for each flooding domain. This means
that when a switch interface is not bridged it will use its own FDB
table. While in bridged mode all DPAA2 switch interfaces under the
same upper will use the same FDB table, thus leverage the same FDB
entries.
* Adding a new MC firmware command - dpsw_set_egress_flood() - through
which the driver can setup the flooding domains as needed. For
example, when the switch interface is standalone, thus not in a
bridge with any other DPAA2 switch port, it will setup its broadcast
and unknown unicast flooding domains to only include the control
interface (the queues that reach the CPU and the driver can dequeue
from). This flooding domain changes when the interface joins a bridge
and is configured to include, beside the control interface, all other
DPAA2 switch interfaces.
We impose a minimum limit of FDB tables available equal to the number of
switch interfaces so that we guarantee that, in the maximal
configuration - all interfaces are standalone, each switch port will
have a private FDB table. At the same time, we only probe DPSW objects
that have the flooding and broadcast replicators configured to be per
FDB (DPSW_*_PER_FDB). Without this, the dpaa2-switch driver would not
be able to configure multiple switching domains.
At probe time, a FDB table will be allocated for each port. At a bridge
join event, the switch port will either continue to use the current FDB
table (if it's the first dpaa2-switch port to join that bridge) or will
switch to use the FDB table associated with the port that it's already
under the bridge. If a FDB switch is necessary, the private FDB table
which was previously used will be returned to the pool of unused FDBs.
Upon a bridge leave, the switch port needs a private FDB table thus it
will search and get the first unused FDB table. This way, all the other
ports remaining under the bridge will continue to use the same FDB
table.
Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
Reviewed-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-03-10 20:14:47 +08:00
|
|
|
/* Setup the egress flooding domains (broadcast, unknown unicast */
|
|
|
|
err = dpaa2_switch_fdb_set_egress_flood(ethsw, port_priv->fdb->fdb_id);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
2021-03-30 22:54:16 +08:00
|
|
|
/* Create an ACL table to be used by this switch port */
|
|
|
|
acl_cfg.max_entries = DPAA2_ETHSW_PORT_MAX_ACL_ENTRIES;
|
|
|
|
err = dpsw_acl_add(ethsw->mc_io, 0, ethsw->dpsw_handle,
|
|
|
|
&port_priv->acl_tbl, &acl_cfg);
|
|
|
|
if (err) {
|
|
|
|
netdev_err(netdev, "dpsw_acl_add err %d\n", err);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
acl_if_cfg.if_id[0] = port_priv->idx;
|
|
|
|
acl_if_cfg.num_ifs = 1;
|
|
|
|
err = dpsw_acl_add_if(ethsw->mc_io, 0, ethsw->dpsw_handle,
|
|
|
|
port_priv->acl_tbl, &acl_if_cfg);
|
|
|
|
if (err) {
|
|
|
|
netdev_err(netdev, "dpsw_acl_add_if err %d\n", err);
|
|
|
|
dpsw_acl_remove(ethsw->mc_io, 0, ethsw->dpsw_handle,
|
|
|
|
port_priv->acl_tbl);
|
|
|
|
}
|
|
|
|
|
2021-03-30 22:54:18 +08:00
|
|
|
err = dpaa2_switch_port_trap_mac_addr(port_priv, stpa);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
2019-08-13 20:42:58 +08:00
|
|
|
return err;
|
2018-03-14 23:55:54 +08:00
|
|
|
}
|
|
|
|
|
2020-10-09 23:30:00 +08:00
|
|
|
static void dpaa2_switch_takedown(struct fsl_mc_device *sw_dev)
|
2018-03-14 23:55:54 +08:00
|
|
|
{
|
|
|
|
struct device *dev = &sw_dev->dev;
|
|
|
|
struct ethsw_core *ethsw = dev_get_drvdata(dev);
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = dpsw_close(ethsw->mc_io, 0, ethsw->dpsw_handle);
|
|
|
|
if (err)
|
|
|
|
dev_warn(dev, "dpsw_close err %d\n", err);
|
|
|
|
}
|
|
|
|
|
2021-03-10 20:14:42 +08:00
|
|
|
static void dpaa2_switch_ctrl_if_teardown(struct ethsw_core *ethsw)
|
|
|
|
{
|
2021-03-10 20:14:46 +08:00
|
|
|
dpsw_ctrl_if_disable(ethsw->mc_io, 0, ethsw->dpsw_handle);
|
2021-03-10 20:14:43 +08:00
|
|
|
dpaa2_switch_free_dpio(ethsw);
|
2021-03-10 20:14:42 +08:00
|
|
|
dpaa2_switch_destroy_rings(ethsw);
|
2021-03-10 20:14:44 +08:00
|
|
|
dpaa2_switch_drain_bp(ethsw);
|
2021-03-10 20:14:42 +08:00
|
|
|
dpaa2_switch_free_dpbp(ethsw);
|
|
|
|
}
|
|
|
|
|
2020-10-09 23:30:00 +08:00
|
|
|
static int dpaa2_switch_remove(struct fsl_mc_device *sw_dev)
|
2018-03-14 23:55:54 +08:00
|
|
|
{
|
|
|
|
struct ethsw_port_priv *port_priv;
|
|
|
|
struct ethsw_core *ethsw;
|
|
|
|
struct device *dev;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
dev = &sw_dev->dev;
|
|
|
|
ethsw = dev_get_drvdata(dev);
|
|
|
|
|
2021-03-10 20:14:42 +08:00
|
|
|
dpaa2_switch_ctrl_if_teardown(ethsw);
|
|
|
|
|
2020-10-09 23:30:00 +08:00
|
|
|
dpaa2_switch_teardown_irqs(sw_dev);
|
2018-03-14 23:55:54 +08:00
|
|
|
|
2019-08-13 20:42:59 +08:00
|
|
|
dpsw_disable(ethsw->mc_io, 0, ethsw->dpsw_handle);
|
2018-03-14 23:55:54 +08:00
|
|
|
|
|
|
|
for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
|
|
|
|
port_priv = ethsw->ports[i];
|
|
|
|
unregister_netdev(port_priv->netdev);
|
|
|
|
free_netdev(port_priv->netdev);
|
|
|
|
}
|
staging: dpaa2-switch: properly setup switching domains
Until now, the DPAA2 switch was not capable to properly setup its
switching domains depending on the existence, or lack thereof, of a
upper bridge device. This meant that all switch ports of a DPSW object
were switching by default even though they were not under the same
bridge device.
Another issue was the inability to actually add the CPU in the flooding
domains (broadcast, unknown unicast etc) of a particular switch port.
This meant that a simple ping on a switch interface was not possible
since no broadcast ARP frame would actually reach the CPU queues.
This patch tries to fix exactly these problems by:
* Creating and managing a FDB table for each flooding domain. This means
that when a switch interface is not bridged it will use its own FDB
table. While in bridged mode all DPAA2 switch interfaces under the
same upper will use the same FDB table, thus leverage the same FDB
entries.
* Adding a new MC firmware command - dpsw_set_egress_flood() - through
which the driver can setup the flooding domains as needed. For
example, when the switch interface is standalone, thus not in a
bridge with any other DPAA2 switch port, it will setup its broadcast
and unknown unicast flooding domains to only include the control
interface (the queues that reach the CPU and the driver can dequeue
from). This flooding domain changes when the interface joins a bridge
and is configured to include, beside the control interface, all other
DPAA2 switch interfaces.
We impose a minimum limit of FDB tables available equal to the number of
switch interfaces so that we guarantee that, in the maximal
configuration - all interfaces are standalone, each switch port will
have a private FDB table. At the same time, we only probe DPSW objects
that have the flooding and broadcast replicators configured to be per
FDB (DPSW_*_PER_FDB). Without this, the dpaa2-switch driver would not
be able to configure multiple switching domains.
At probe time, a FDB table will be allocated for each port. At a bridge
join event, the switch port will either continue to use the current FDB
table (if it's the first dpaa2-switch port to join that bridge) or will
switch to use the FDB table associated with the port that it's already
under the bridge. If a FDB switch is necessary, the private FDB table
which was previously used will be returned to the pool of unused FDBs.
Upon a bridge leave, the switch port needs a private FDB table thus it
will search and get the first unused FDB table. This way, all the other
ports remaining under the bridge will continue to use the same FDB
table.
Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
Reviewed-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-03-10 20:14:47 +08:00
|
|
|
|
|
|
|
kfree(ethsw->fdbs);
|
2018-03-14 23:55:54 +08:00
|
|
|
kfree(ethsw->ports);
|
|
|
|
|
2020-10-09 23:30:00 +08:00
|
|
|
dpaa2_switch_takedown(sw_dev);
|
2020-07-21 17:19:17 +08:00
|
|
|
|
|
|
|
destroy_workqueue(ethsw->workqueue);
|
|
|
|
|
2018-03-14 23:55:54 +08:00
|
|
|
fsl_mc_portal_free(ethsw->mc_io);
|
|
|
|
|
|
|
|
kfree(ethsw);
|
|
|
|
|
|
|
|
dev_set_drvdata(dev, NULL);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-10-09 23:30:00 +08:00
|
|
|
static int dpaa2_switch_probe_port(struct ethsw_core *ethsw,
|
|
|
|
u16 port_idx)
|
2018-03-14 23:55:54 +08:00
|
|
|
{
|
|
|
|
struct ethsw_port_priv *port_priv;
|
|
|
|
struct device *dev = ethsw->dev;
|
|
|
|
struct net_device *port_netdev;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
port_netdev = alloc_etherdev(sizeof(struct ethsw_port_priv));
|
|
|
|
if (!port_netdev) {
|
|
|
|
dev_err(dev, "alloc_etherdev error\n");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
port_priv = netdev_priv(port_netdev);
|
|
|
|
port_priv->netdev = port_netdev;
|
|
|
|
port_priv->ethsw_data = ethsw;
|
|
|
|
|
|
|
|
port_priv->idx = port_idx;
|
|
|
|
port_priv->stp_state = BR_STATE_FORWARDING;
|
|
|
|
|
|
|
|
SET_NETDEV_DEV(port_netdev, dev);
|
2020-10-09 23:30:00 +08:00
|
|
|
port_netdev->netdev_ops = &dpaa2_switch_port_ops;
|
2020-10-09 23:29:59 +08:00
|
|
|
port_netdev->ethtool_ops = &dpaa2_switch_port_ethtool_ops;
|
2018-03-14 23:55:54 +08:00
|
|
|
|
2021-03-10 20:14:45 +08:00
|
|
|
port_netdev->needed_headroom = DPAA2_SWITCH_NEEDED_HEADROOM;
|
|
|
|
|
2021-03-23 04:58:57 +08:00
|
|
|
port_priv->bcast_flood = true;
|
2021-03-23 04:58:58 +08:00
|
|
|
port_priv->ucast_flood = true;
|
2021-03-23 04:58:57 +08:00
|
|
|
|
2018-03-14 23:55:54 +08:00
|
|
|
/* Set MTU limits */
|
|
|
|
port_netdev->min_mtu = ETH_MIN_MTU;
|
|
|
|
port_netdev->max_mtu = ETHSW_MAX_FRAME_LENGTH;
|
|
|
|
|
staging: dpaa2-switch: properly setup switching domains
Until now, the DPAA2 switch was not capable to properly setup its
switching domains depending on the existence, or lack thereof, of a
upper bridge device. This meant that all switch ports of a DPSW object
were switching by default even though they were not under the same
bridge device.
Another issue was the inability to actually add the CPU in the flooding
domains (broadcast, unknown unicast etc) of a particular switch port.
This meant that a simple ping on a switch interface was not possible
since no broadcast ARP frame would actually reach the CPU queues.
This patch tries to fix exactly these problems by:
* Creating and managing a FDB table for each flooding domain. This means
that when a switch interface is not bridged it will use its own FDB
table. While in bridged mode all DPAA2 switch interfaces under the
same upper will use the same FDB table, thus leverage the same FDB
entries.
* Adding a new MC firmware command - dpsw_set_egress_flood() - through
which the driver can setup the flooding domains as needed. For
example, when the switch interface is standalone, thus not in a
bridge with any other DPAA2 switch port, it will setup its broadcast
and unknown unicast flooding domains to only include the control
interface (the queues that reach the CPU and the driver can dequeue
from). This flooding domain changes when the interface joins a bridge
and is configured to include, beside the control interface, all other
DPAA2 switch interfaces.
We impose a minimum limit of FDB tables available equal to the number of
switch interfaces so that we guarantee that, in the maximal
configuration - all interfaces are standalone, each switch port will
have a private FDB table. At the same time, we only probe DPSW objects
that have the flooding and broadcast replicators configured to be per
FDB (DPSW_*_PER_FDB). Without this, the dpaa2-switch driver would not
be able to configure multiple switching domains.
At probe time, a FDB table will be allocated for each port. At a bridge
join event, the switch port will either continue to use the current FDB
table (if it's the first dpaa2-switch port to join that bridge) or will
switch to use the FDB table associated with the port that it's already
under the bridge. If a FDB switch is necessary, the private FDB table
which was previously used will be returned to the pool of unused FDBs.
Upon a bridge leave, the switch port needs a private FDB table thus it
will search and get the first unused FDB table. This way, all the other
ports remaining under the bridge will continue to use the same FDB
table.
Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
Reviewed-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-03-10 20:14:47 +08:00
|
|
|
/* Populate the private port structure so that later calls to
|
|
|
|
* dpaa2_switch_port_init() can use it.
|
|
|
|
*/
|
|
|
|
ethsw->ports[port_idx] = port_priv;
|
|
|
|
|
2021-03-10 20:14:49 +08:00
|
|
|
/* The DPAA2 switch's ingress path depends on the VLAN table,
|
|
|
|
* thus we are not able to disable VLAN filtering.
|
|
|
|
*/
|
|
|
|
port_netdev->features = NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER;
|
|
|
|
|
2020-10-09 23:30:00 +08:00
|
|
|
err = dpaa2_switch_port_init(port_priv, port_idx);
|
2019-08-13 20:43:06 +08:00
|
|
|
if (err)
|
|
|
|
goto err_port_probe;
|
|
|
|
|
2020-10-09 23:30:00 +08:00
|
|
|
err = dpaa2_switch_port_set_mac_addr(port_priv);
|
2020-07-14 21:34:31 +08:00
|
|
|
if (err)
|
|
|
|
goto err_port_probe;
|
|
|
|
|
2021-03-23 04:58:56 +08:00
|
|
|
err = dpaa2_switch_port_set_learning(port_priv, false);
|
|
|
|
if (err)
|
|
|
|
goto err_port_probe;
|
2021-03-30 22:54:17 +08:00
|
|
|
port_priv->learn_ena = false;
|
2021-03-23 04:58:56 +08:00
|
|
|
|
2019-07-05 22:27:13 +08:00
|
|
|
return 0;
|
|
|
|
|
2019-08-13 20:43:06 +08:00
|
|
|
err_port_probe:
|
2019-07-05 22:27:13 +08:00
|
|
|
free_netdev(port_netdev);
|
staging: dpaa2-switch: properly setup switching domains
Until now, the DPAA2 switch was not capable to properly setup its
switching domains depending on the existence, or lack thereof, of a
upper bridge device. This meant that all switch ports of a DPSW object
were switching by default even though they were not under the same
bridge device.
Another issue was the inability to actually add the CPU in the flooding
domains (broadcast, unknown unicast etc) of a particular switch port.
This meant that a simple ping on a switch interface was not possible
since no broadcast ARP frame would actually reach the CPU queues.
This patch tries to fix exactly these problems by:
* Creating and managing a FDB table for each flooding domain. This means
that when a switch interface is not bridged it will use its own FDB
table. While in bridged mode all DPAA2 switch interfaces under the
same upper will use the same FDB table, thus leverage the same FDB
entries.
* Adding a new MC firmware command - dpsw_set_egress_flood() - through
which the driver can setup the flooding domains as needed. For
example, when the switch interface is standalone, thus not in a
bridge with any other DPAA2 switch port, it will setup its broadcast
and unknown unicast flooding domains to only include the control
interface (the queues that reach the CPU and the driver can dequeue
from). This flooding domain changes when the interface joins a bridge
and is configured to include, beside the control interface, all other
DPAA2 switch interfaces.
We impose a minimum limit of FDB tables available equal to the number of
switch interfaces so that we guarantee that, in the maximal
configuration - all interfaces are standalone, each switch port will
have a private FDB table. At the same time, we only probe DPSW objects
that have the flooding and broadcast replicators configured to be per
FDB (DPSW_*_PER_FDB). Without this, the dpaa2-switch driver would not
be able to configure multiple switching domains.
At probe time, a FDB table will be allocated for each port. At a bridge
join event, the switch port will either continue to use the current FDB
table (if it's the first dpaa2-switch port to join that bridge) or will
switch to use the FDB table associated with the port that it's already
under the bridge. If a FDB switch is necessary, the private FDB table
which was previously used will be returned to the pool of unused FDBs.
Upon a bridge leave, the switch port needs a private FDB table thus it
will search and get the first unused FDB table. This way, all the other
ports remaining under the bridge will continue to use the same FDB
table.
Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
Reviewed-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-03-10 20:14:47 +08:00
|
|
|
ethsw->ports[port_idx] = NULL;
|
2019-07-05 22:27:13 +08:00
|
|
|
|
|
|
|
return err;
|
2018-03-14 23:55:54 +08:00
|
|
|
}
|
|
|
|
|
2020-10-09 23:30:00 +08:00
|
|
|
static int dpaa2_switch_probe(struct fsl_mc_device *sw_dev)
|
2018-03-14 23:55:54 +08:00
|
|
|
{
|
|
|
|
struct device *dev = &sw_dev->dev;
|
|
|
|
struct ethsw_core *ethsw;
|
|
|
|
int i, err;
|
|
|
|
|
|
|
|
/* Allocate switch core*/
|
|
|
|
ethsw = kzalloc(sizeof(*ethsw), GFP_KERNEL);
|
|
|
|
|
|
|
|
if (!ethsw)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
ethsw->dev = dev;
|
2021-03-10 20:14:44 +08:00
|
|
|
ethsw->iommu_domain = iommu_get_domain_for_dev(dev);
|
2018-03-14 23:55:54 +08:00
|
|
|
dev_set_drvdata(dev, ethsw);
|
|
|
|
|
2019-07-05 22:27:12 +08:00
|
|
|
err = fsl_mc_portal_allocate(sw_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
|
|
|
|
ðsw->mc_io);
|
2018-03-14 23:55:54 +08:00
|
|
|
if (err) {
|
|
|
|
if (err == -ENXIO)
|
|
|
|
err = -EPROBE_DEFER;
|
|
|
|
else
|
|
|
|
dev_err(dev, "fsl_mc_portal_allocate err %d\n", err);
|
|
|
|
goto err_free_drvdata;
|
|
|
|
}
|
|
|
|
|
2020-10-09 23:30:00 +08:00
|
|
|
err = dpaa2_switch_init(sw_dev);
|
2018-03-14 23:55:54 +08:00
|
|
|
if (err)
|
|
|
|
goto err_free_cmdport;
|
|
|
|
|
|
|
|
ethsw->ports = kcalloc(ethsw->sw_attr.num_ifs, sizeof(*ethsw->ports),
|
|
|
|
GFP_KERNEL);
|
|
|
|
if (!(ethsw->ports)) {
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto err_takedown;
|
|
|
|
}
|
|
|
|
|
staging: dpaa2-switch: properly setup switching domains
Until now, the DPAA2 switch was not capable to properly setup its
switching domains depending on the existence, or lack thereof, of a
upper bridge device. This meant that all switch ports of a DPSW object
were switching by default even though they were not under the same
bridge device.
Another issue was the inability to actually add the CPU in the flooding
domains (broadcast, unknown unicast etc) of a particular switch port.
This meant that a simple ping on a switch interface was not possible
since no broadcast ARP frame would actually reach the CPU queues.
This patch tries to fix exactly these problems by:
* Creating and managing a FDB table for each flooding domain. This means
that when a switch interface is not bridged it will use its own FDB
table. While in bridged mode all DPAA2 switch interfaces under the
same upper will use the same FDB table, thus leverage the same FDB
entries.
* Adding a new MC firmware command - dpsw_set_egress_flood() - through
which the driver can setup the flooding domains as needed. For
example, when the switch interface is standalone, thus not in a
bridge with any other DPAA2 switch port, it will setup its broadcast
and unknown unicast flooding domains to only include the control
interface (the queues that reach the CPU and the driver can dequeue
from). This flooding domain changes when the interface joins a bridge
and is configured to include, beside the control interface, all other
DPAA2 switch interfaces.
We impose a minimum limit of FDB tables available equal to the number of
switch interfaces so that we guarantee that, in the maximal
configuration - all interfaces are standalone, each switch port will
have a private FDB table. At the same time, we only probe DPSW objects
that have the flooding and broadcast replicators configured to be per
FDB (DPSW_*_PER_FDB). Without this, the dpaa2-switch driver would not
be able to configure multiple switching domains.
At probe time, a FDB table will be allocated for each port. At a bridge
join event, the switch port will either continue to use the current FDB
table (if it's the first dpaa2-switch port to join that bridge) or will
switch to use the FDB table associated with the port that it's already
under the bridge. If a FDB switch is necessary, the private FDB table
which was previously used will be returned to the pool of unused FDBs.
Upon a bridge leave, the switch port needs a private FDB table thus it
will search and get the first unused FDB table. This way, all the other
ports remaining under the bridge will continue to use the same FDB
table.
Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
Reviewed-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-03-10 20:14:47 +08:00
|
|
|
ethsw->fdbs = kcalloc(ethsw->sw_attr.num_ifs, sizeof(*ethsw->fdbs),
|
|
|
|
GFP_KERNEL);
|
|
|
|
if (!ethsw->fdbs) {
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto err_free_ports;
|
|
|
|
}
|
|
|
|
|
2018-03-14 23:55:54 +08:00
|
|
|
for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
|
2020-10-09 23:30:00 +08:00
|
|
|
err = dpaa2_switch_probe_port(ethsw, i);
|
2018-03-14 23:55:54 +08:00
|
|
|
if (err)
|
staging: dpaa2-switch: properly setup switching domains
Until now, the DPAA2 switch was not capable to properly setup its
switching domains depending on the existence, or lack thereof, of a
upper bridge device. This meant that all switch ports of a DPSW object
were switching by default even though they were not under the same
bridge device.
Another issue was the inability to actually add the CPU in the flooding
domains (broadcast, unknown unicast etc) of a particular switch port.
This meant that a simple ping on a switch interface was not possible
since no broadcast ARP frame would actually reach the CPU queues.
This patch tries to fix exactly these problems by:
* Creating and managing a FDB table for each flooding domain. This means
that when a switch interface is not bridged it will use its own FDB
table. While in bridged mode all DPAA2 switch interfaces under the
same upper will use the same FDB table, thus leverage the same FDB
entries.
* Adding a new MC firmware command - dpsw_set_egress_flood() - through
which the driver can setup the flooding domains as needed. For
example, when the switch interface is standalone, thus not in a
bridge with any other DPAA2 switch port, it will setup its broadcast
and unknown unicast flooding domains to only include the control
interface (the queues that reach the CPU and the driver can dequeue
from). This flooding domain changes when the interface joins a bridge
and is configured to include, beside the control interface, all other
DPAA2 switch interfaces.
We impose a minimum limit of FDB tables available equal to the number of
switch interfaces so that we guarantee that, in the maximal
configuration - all interfaces are standalone, each switch port will
have a private FDB table. At the same time, we only probe DPSW objects
that have the flooding and broadcast replicators configured to be per
FDB (DPSW_*_PER_FDB). Without this, the dpaa2-switch driver would not
be able to configure multiple switching domains.
At probe time, a FDB table will be allocated for each port. At a bridge
join event, the switch port will either continue to use the current FDB
table (if it's the first dpaa2-switch port to join that bridge) or will
switch to use the FDB table associated with the port that it's already
under the bridge. If a FDB switch is necessary, the private FDB table
which was previously used will be returned to the pool of unused FDBs.
Upon a bridge leave, the switch port needs a private FDB table thus it
will search and get the first unused FDB table. This way, all the other
ports remaining under the bridge will continue to use the same FDB
table.
Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
Reviewed-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-03-10 20:14:47 +08:00
|
|
|
goto err_free_netdev;
|
2018-03-14 23:55:54 +08:00
|
|
|
}
|
|
|
|
|
2021-03-10 20:14:44 +08:00
|
|
|
/* Add a NAPI instance for each of the Rx queues. The first port's
|
|
|
|
* net_device will be associated with the instances since we do not have
|
|
|
|
* different queues for each switch ports.
|
|
|
|
*/
|
|
|
|
for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++)
|
|
|
|
netif_napi_add(ethsw->ports[0]->netdev,
|
|
|
|
ðsw->fq[i].napi, dpaa2_switch_poll,
|
|
|
|
NAPI_POLL_WEIGHT);
|
|
|
|
|
2019-08-13 20:42:59 +08:00
|
|
|
err = dpsw_enable(ethsw->mc_io, 0, ethsw->dpsw_handle);
|
|
|
|
if (err) {
|
|
|
|
dev_err(ethsw->dev, "dpsw_enable err %d\n", err);
|
staging: dpaa2-switch: properly setup switching domains
Until now, the DPAA2 switch was not capable to properly setup its
switching domains depending on the existence, or lack thereof, of a
upper bridge device. This meant that all switch ports of a DPSW object
were switching by default even though they were not under the same
bridge device.
Another issue was the inability to actually add the CPU in the flooding
domains (broadcast, unknown unicast etc) of a particular switch port.
This meant that a simple ping on a switch interface was not possible
since no broadcast ARP frame would actually reach the CPU queues.
This patch tries to fix exactly these problems by:
* Creating and managing a FDB table for each flooding domain. This means
that when a switch interface is not bridged it will use its own FDB
table. While in bridged mode all DPAA2 switch interfaces under the
same upper will use the same FDB table, thus leverage the same FDB
entries.
* Adding a new MC firmware command - dpsw_set_egress_flood() - through
which the driver can setup the flooding domains as needed. For
example, when the switch interface is standalone, thus not in a
bridge with any other DPAA2 switch port, it will setup its broadcast
and unknown unicast flooding domains to only include the control
interface (the queues that reach the CPU and the driver can dequeue
from). This flooding domain changes when the interface joins a bridge
and is configured to include, beside the control interface, all other
DPAA2 switch interfaces.
We impose a minimum limit of FDB tables available equal to the number of
switch interfaces so that we guarantee that, in the maximal
configuration - all interfaces are standalone, each switch port will
have a private FDB table. At the same time, we only probe DPSW objects
that have the flooding and broadcast replicators configured to be per
FDB (DPSW_*_PER_FDB). Without this, the dpaa2-switch driver would not
be able to configure multiple switching domains.
At probe time, a FDB table will be allocated for each port. At a bridge
join event, the switch port will either continue to use the current FDB
table (if it's the first dpaa2-switch port to join that bridge) or will
switch to use the FDB table associated with the port that it's already
under the bridge. If a FDB switch is necessary, the private FDB table
which was previously used will be returned to the pool of unused FDBs.
Upon a bridge leave, the switch port needs a private FDB table thus it
will search and get the first unused FDB table. This way, all the other
ports remaining under the bridge will continue to use the same FDB
table.
Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
Reviewed-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-03-10 20:14:47 +08:00
|
|
|
goto err_free_netdev;
|
2019-08-13 20:42:59 +08:00
|
|
|
}
|
2018-03-14 23:55:54 +08:00
|
|
|
|
|
|
|
/* Setup IRQs */
|
2020-10-09 23:30:00 +08:00
|
|
|
err = dpaa2_switch_setup_irqs(sw_dev);
|
2018-03-14 23:55:54 +08:00
|
|
|
if (err)
|
|
|
|
goto err_stop;
|
|
|
|
|
2021-03-10 20:14:44 +08:00
|
|
|
/* Register the netdev only when the entire setup is done and the
|
|
|
|
* switch port interfaces are ready to receive traffic
|
|
|
|
*/
|
|
|
|
for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
|
|
|
|
err = register_netdev(ethsw->ports[i]->netdev);
|
|
|
|
if (err < 0) {
|
|
|
|
dev_err(dev, "register_netdev error %d\n", err);
|
|
|
|
goto err_unregister_ports;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-14 23:55:54 +08:00
|
|
|
return 0;
|
|
|
|
|
2021-03-10 20:14:44 +08:00
|
|
|
err_unregister_ports:
|
|
|
|
for (i--; i >= 0; i--)
|
|
|
|
unregister_netdev(ethsw->ports[i]->netdev);
|
staging: dpaa2-switch: properly setup switching domains
Until now, the DPAA2 switch was not capable to properly setup its
switching domains depending on the existence, or lack thereof, of a
upper bridge device. This meant that all switch ports of a DPSW object
were switching by default even though they were not under the same
bridge device.
Another issue was the inability to actually add the CPU in the flooding
domains (broadcast, unknown unicast etc) of a particular switch port.
This meant that a simple ping on a switch interface was not possible
since no broadcast ARP frame would actually reach the CPU queues.
This patch tries to fix exactly these problems by:
* Creating and managing a FDB table for each flooding domain. This means
that when a switch interface is not bridged it will use its own FDB
table. While in bridged mode all DPAA2 switch interfaces under the
same upper will use the same FDB table, thus leverage the same FDB
entries.
* Adding a new MC firmware command - dpsw_set_egress_flood() - through
which the driver can setup the flooding domains as needed. For
example, when the switch interface is standalone, thus not in a
bridge with any other DPAA2 switch port, it will setup its broadcast
and unknown unicast flooding domains to only include the control
interface (the queues that reach the CPU and the driver can dequeue
from). This flooding domain changes when the interface joins a bridge
and is configured to include, beside the control interface, all other
DPAA2 switch interfaces.
We impose a minimum limit of FDB tables available equal to the number of
switch interfaces so that we guarantee that, in the maximal
configuration - all interfaces are standalone, each switch port will
have a private FDB table. At the same time, we only probe DPSW objects
that have the flooding and broadcast replicators configured to be per
FDB (DPSW_*_PER_FDB). Without this, the dpaa2-switch driver would not
be able to configure multiple switching domains.
At probe time, a FDB table will be allocated for each port. At a bridge
join event, the switch port will either continue to use the current FDB
table (if it's the first dpaa2-switch port to join that bridge) or will
switch to use the FDB table associated with the port that it's already
under the bridge. If a FDB switch is necessary, the private FDB table
which was previously used will be returned to the pool of unused FDBs.
Upon a bridge leave, the switch port needs a private FDB table thus it
will search and get the first unused FDB table. This way, all the other
ports remaining under the bridge will continue to use the same FDB
table.
Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
Reviewed-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-03-10 20:14:47 +08:00
|
|
|
dpaa2_switch_teardown_irqs(sw_dev);
|
2018-03-14 23:55:54 +08:00
|
|
|
err_stop:
|
2019-08-13 20:42:59 +08:00
|
|
|
dpsw_disable(ethsw->mc_io, 0, ethsw->dpsw_handle);
|
staging: dpaa2-switch: properly setup switching domains
Until now, the DPAA2 switch was not capable to properly setup its
switching domains depending on the existence, or lack thereof, of a
upper bridge device. This meant that all switch ports of a DPSW object
were switching by default even though they were not under the same
bridge device.
Another issue was the inability to actually add the CPU in the flooding
domains (broadcast, unknown unicast etc) of a particular switch port.
This meant that a simple ping on a switch interface was not possible
since no broadcast ARP frame would actually reach the CPU queues.
This patch tries to fix exactly these problems by:
* Creating and managing a FDB table for each flooding domain. This means
that when a switch interface is not bridged it will use its own FDB
table. While in bridged mode all DPAA2 switch interfaces under the
same upper will use the same FDB table, thus leverage the same FDB
entries.
* Adding a new MC firmware command - dpsw_set_egress_flood() - through
which the driver can setup the flooding domains as needed. For
example, when the switch interface is standalone, thus not in a
bridge with any other DPAA2 switch port, it will setup its broadcast
and unknown unicast flooding domains to only include the control
interface (the queues that reach the CPU and the driver can dequeue
from). This flooding domain changes when the interface joins a bridge
and is configured to include, beside the control interface, all other
DPAA2 switch interfaces.
We impose a minimum limit of FDB tables available equal to the number of
switch interfaces so that we guarantee that, in the maximal
configuration - all interfaces are standalone, each switch port will
have a private FDB table. At the same time, we only probe DPSW objects
that have the flooding and broadcast replicators configured to be per
FDB (DPSW_*_PER_FDB). Without this, the dpaa2-switch driver would not
be able to configure multiple switching domains.
At probe time, a FDB table will be allocated for each port. At a bridge
join event, the switch port will either continue to use the current FDB
table (if it's the first dpaa2-switch port to join that bridge) or will
switch to use the FDB table associated with the port that it's already
under the bridge. If a FDB switch is necessary, the private FDB table
which was previously used will be returned to the pool of unused FDBs.
Upon a bridge leave, the switch port needs a private FDB table thus it
will search and get the first unused FDB table. This way, all the other
ports remaining under the bridge will continue to use the same FDB
table.
Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
Reviewed-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-03-10 20:14:47 +08:00
|
|
|
err_free_netdev:
|
2021-03-10 20:14:44 +08:00
|
|
|
for (i--; i >= 0; i--)
|
2018-03-14 23:55:54 +08:00
|
|
|
free_netdev(ethsw->ports[i]->netdev);
|
staging: dpaa2-switch: properly setup switching domains
Until now, the DPAA2 switch was not capable to properly setup its
switching domains depending on the existence, or lack thereof, of a
upper bridge device. This meant that all switch ports of a DPSW object
were switching by default even though they were not under the same
bridge device.
Another issue was the inability to actually add the CPU in the flooding
domains (broadcast, unknown unicast etc) of a particular switch port.
This meant that a simple ping on a switch interface was not possible
since no broadcast ARP frame would actually reach the CPU queues.
This patch tries to fix exactly these problems by:
* Creating and managing a FDB table for each flooding domain. This means
that when a switch interface is not bridged it will use its own FDB
table. While in bridged mode all DPAA2 switch interfaces under the
same upper will use the same FDB table, thus leverage the same FDB
entries.
* Adding a new MC firmware command - dpsw_set_egress_flood() - through
which the driver can setup the flooding domains as needed. For
example, when the switch interface is standalone, thus not in a
bridge with any other DPAA2 switch port, it will setup its broadcast
and unknown unicast flooding domains to only include the control
interface (the queues that reach the CPU and the driver can dequeue
from). This flooding domain changes when the interface joins a bridge
and is configured to include, beside the control interface, all other
DPAA2 switch interfaces.
We impose a minimum limit of FDB tables available equal to the number of
switch interfaces so that we guarantee that, in the maximal
configuration - all interfaces are standalone, each switch port will
have a private FDB table. At the same time, we only probe DPSW objects
that have the flooding and broadcast replicators configured to be per
FDB (DPSW_*_PER_FDB). Without this, the dpaa2-switch driver would not
be able to configure multiple switching domains.
At probe time, a FDB table will be allocated for each port. At a bridge
join event, the switch port will either continue to use the current FDB
table (if it's the first dpaa2-switch port to join that bridge) or will
switch to use the FDB table associated with the port that it's already
under the bridge. If a FDB switch is necessary, the private FDB table
which was previously used will be returned to the pool of unused FDBs.
Upon a bridge leave, the switch port needs a private FDB table thus it
will search and get the first unused FDB table. This way, all the other
ports remaining under the bridge will continue to use the same FDB
table.
Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
Reviewed-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-03-10 20:14:47 +08:00
|
|
|
kfree(ethsw->fdbs);
|
|
|
|
err_free_ports:
|
2018-03-14 23:55:54 +08:00
|
|
|
kfree(ethsw->ports);
|
|
|
|
|
|
|
|
err_takedown:
|
2020-10-09 23:30:00 +08:00
|
|
|
dpaa2_switch_takedown(sw_dev);
|
2018-03-14 23:55:54 +08:00
|
|
|
|
|
|
|
err_free_cmdport:
|
|
|
|
fsl_mc_portal_free(ethsw->mc_io);
|
|
|
|
|
|
|
|
err_free_drvdata:
|
|
|
|
kfree(ethsw);
|
|
|
|
dev_set_drvdata(dev, NULL);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2020-10-09 23:30:00 +08:00
|
|
|
static const struct fsl_mc_device_id dpaa2_switch_match_id_table[] = {
|
2018-03-14 23:55:54 +08:00
|
|
|
{
|
|
|
|
.vendor = FSL_MC_VENDOR_FREESCALE,
|
|
|
|
.obj_type = "dpsw",
|
|
|
|
},
|
|
|
|
{ .vendor = 0x0 }
|
|
|
|
};
|
2020-10-09 23:30:00 +08:00
|
|
|
MODULE_DEVICE_TABLE(fslmc, dpaa2_switch_match_id_table);
|
2018-03-14 23:55:54 +08:00
|
|
|
|
2020-10-09 23:30:00 +08:00
|
|
|
static struct fsl_mc_driver dpaa2_switch_drv = {
|
2018-03-14 23:55:54 +08:00
|
|
|
.driver = {
|
|
|
|
.name = KBUILD_MODNAME,
|
|
|
|
.owner = THIS_MODULE,
|
|
|
|
},
|
2020-10-09 23:30:00 +08:00
|
|
|
.probe = dpaa2_switch_probe,
|
|
|
|
.remove = dpaa2_switch_remove,
|
|
|
|
.match_id_table = dpaa2_switch_match_id_table
|
2018-03-14 23:55:54 +08:00
|
|
|
};
|
|
|
|
|
2021-03-10 20:14:48 +08:00
|
|
|
static struct notifier_block dpaa2_switch_port_nb __read_mostly = {
|
|
|
|
.notifier_call = dpaa2_switch_port_netdevice_event,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct notifier_block dpaa2_switch_port_switchdev_nb = {
|
|
|
|
.notifier_call = dpaa2_switch_port_event,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct notifier_block dpaa2_switch_port_switchdev_blocking_nb = {
|
|
|
|
.notifier_call = dpaa2_switch_port_blocking_event,
|
|
|
|
};
|
|
|
|
|
|
|
|
static int dpaa2_switch_register_notifiers(void)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = register_netdevice_notifier(&dpaa2_switch_port_nb);
|
|
|
|
if (err) {
|
|
|
|
pr_err("dpaa2-switch: failed to register net_device notifier (%d)\n", err);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = register_switchdev_notifier(&dpaa2_switch_port_switchdev_nb);
|
|
|
|
if (err) {
|
|
|
|
pr_err("dpaa2-switch: failed to register switchdev notifier (%d)\n", err);
|
|
|
|
goto err_switchdev_nb;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = register_switchdev_blocking_notifier(&dpaa2_switch_port_switchdev_blocking_nb);
|
|
|
|
if (err) {
|
|
|
|
pr_err("dpaa2-switch: failed to register switchdev blocking notifier (%d)\n", err);
|
|
|
|
goto err_switchdev_blocking_nb;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_switchdev_blocking_nb:
|
|
|
|
unregister_switchdev_notifier(&dpaa2_switch_port_switchdev_nb);
|
|
|
|
err_switchdev_nb:
|
|
|
|
unregister_netdevice_notifier(&dpaa2_switch_port_nb);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void dpaa2_switch_unregister_notifiers(void)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = unregister_switchdev_blocking_notifier(&dpaa2_switch_port_switchdev_blocking_nb);
|
|
|
|
if (err)
|
|
|
|
pr_err("dpaa2-switch: failed to unregister switchdev blocking notifier (%d)\n",
|
|
|
|
err);
|
|
|
|
|
|
|
|
err = unregister_switchdev_notifier(&dpaa2_switch_port_switchdev_nb);
|
|
|
|
if (err)
|
|
|
|
pr_err("dpaa2-switch: failed to unregister switchdev notifier (%d)\n", err);
|
|
|
|
|
|
|
|
err = unregister_netdevice_notifier(&dpaa2_switch_port_nb);
|
|
|
|
if (err)
|
|
|
|
pr_err("dpaa2-switch: failed to unregister net_device notifier (%d)\n", err);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int __init dpaa2_switch_driver_init(void)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = fsl_mc_driver_register(&dpaa2_switch_drv);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
err = dpaa2_switch_register_notifiers();
|
|
|
|
if (err) {
|
|
|
|
fsl_mc_driver_unregister(&dpaa2_switch_drv);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __exit dpaa2_switch_driver_exit(void)
|
|
|
|
{
|
|
|
|
dpaa2_switch_unregister_notifiers();
|
|
|
|
fsl_mc_driver_unregister(&dpaa2_switch_drv);
|
|
|
|
}
|
|
|
|
|
|
|
|
module_init(dpaa2_switch_driver_init);
|
|
|
|
module_exit(dpaa2_switch_driver_exit);
|
2018-03-14 23:55:54 +08:00
|
|
|
|
|
|
|
MODULE_LICENSE("GPL v2");
|
|
|
|
MODULE_DESCRIPTION("DPAA2 Ethernet Switch Driver");
|