mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-27 06:34:11 +08:00
mlx5-updates-2021-04-21
devlink external port attribute for SF (Sub-Function) port flavour This adds the support to instantiate Sub-Functions on external hosts E.g when Eswitch manager is enabled on the ARM SmarNic SoC CPU, users are now able to spawn new Sub-Functions on the Host server CPU. Parav Pandit Says: ================== This series introduces and uses external attribute for the SF port to indicate that a SF port belongs to an external controller. This is needed to generate unique phys_port_name when PF and SF numbers are overlapping between local and external controllers. For example two controllers 0 and 1, both of these controller have a SF. having PF number 0, SF number 77. Here, phys_port_name has duplicate entry which doesn't have controller number in it. Hence, add controller number optionally when a SF port is for an external controller. This extension is similar to existing PF and VF eswitch ports of the external controller. When a SF is for external controller an example view of external SF port and config sequence: On eswitch system: $ devlink dev eswitch set pci/0033:01:00.0 mode switchdev $ devlink port show pci/0033:01:00.0/196607: type eth netdev enP51p1s0f0np0 flavour physical port 0 splittable false pci/0033:01:00.0/131072: type eth netdev eth0 flavour pcipf controller 1 pfnum 0 external true splittable false function: hw_addr 00:00:00:00:00:00 $ devlink port add pci/0033:01:00.0 flavour pcisf pfnum 0 sfnum 77 controller 1 pci/0033:01:00.0/163840: type eth netdev eth1 flavour pcisf controller 1 pfnum 0 sfnum 77 splittable false function: hw_addr 00:00:00:00:00:00 state inactive opstate detached phys_port_name construction: $ cat /sys/class/net/eth1/phys_port_name c1pf0sf77 Patch summary: First 3 patches prepares the eswitch to handle vports in more generic way using xarray to lookup vport from its unique vport number. Patch-1 returns maximum eswitch ports only when eswitch is enabled Patch-2 prepares eswitch to return eswitch max ports from a struct Patch-3 uses xarray for vport and representor lookup Patch-4 considers SF for an additioanl range of SF vports Patch-5 relies on SF hw table to check SF support Patch-6 extends SF devlink port attribute for external flag Patch-7 stores the per controller SF allocation attributes Patch-8 uses SF function id for filtering events Patch-9 uses helper for allocation and free Patch-10 splits hw table into per controller table and generic one Patch-11 extends sf table for additional range ================== -----BEGIN PGP SIGNATURE----- iQEzBAABCAAdFiEEGhZs6bAKwk/OTgTpSD+KveBX+j4FAmCDz80ACgkQSD+KveBX +j4LmggAwS9otYoo639Kmow/wlMZ6yyLsH02zVMFLEJ2AE4VbL73i4iiQ67ZWygL yQ8HawPAnythx4RsN/M6/WjSKRpdqTC27C9CpdM78zhXb1vnOrlzba7rYngqmo7N 5fIkGyjsUGHNqq+15SftK7JYbXFTe1b5RdWawXkQoyBlXTTBamyxD7C5NMpoDots /e88Bs8Zy5nVPZqPchIId8TZEKKuO/heTz8ks6q6s/t1MGj7QP+ddxVMgNg00NR5 OpNTr7YYdpHxpfLSUZgdHaptwwKOx+nou8LdJkIKWPs7SHX6HDggyZJjGBOEWtE7 qG7oSip4olOTM0w9PZrAewLwSYhq7Q== =oqBr -----END PGP SIGNATURE----- Merge tag 'mlx5-updates-2021-04-21' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux Saeed Mahameed says: ==================== mlx5-updates-2021-04-21 devlink external port attribute for SF (Sub-Function) port flavour This adds the support to instantiate Sub-Functions on external hosts E.g when Eswitch manager is enabled on the ARM SmarNic SoC CPU, users are now able to spawn new Sub-Functions on the Host server CPU. Parav Pandit Says: ================== This series introduces and uses external attribute for the SF port to indicate that a SF port belongs to an external controller. This is needed to generate unique phys_port_name when PF and SF numbers are overlapping between local and external controllers. For example two controllers 0 and 1, both of these controller have a SF. having PF number 0, SF number 77. Here, phys_port_name has duplicate entry which doesn't have controller number in it. Hence, add controller number optionally when a SF port is for an external controller. This extension is similar to existing PF and VF eswitch ports of the external controller. When a SF is for external controller an example view of external SF port and config sequence: On eswitch system: $ devlink dev eswitch set pci/0033:01:00.0 mode switchdev $ devlink port show pci/0033:01:00.0/196607: type eth netdev enP51p1s0f0np0 flavour physical port 0 splittable false pci/0033:01:00.0/131072: type eth netdev eth0 flavour pcipf controller 1 pfnum 0 external true splittable false function: hw_addr 00:00:00:00:00:00 $ devlink port add pci/0033:01:00.0 flavour pcisf pfnum 0 sfnum 77 controller 1 pci/0033:01:00.0/163840: type eth netdev eth1 flavour pcisf controller 1 pfnum 0 sfnum 77 splittable false function: hw_addr 00:00:00:00:00:00 state inactive opstate detached phys_port_name construction: $ cat /sys/class/net/eth1/phys_port_name c1pf0sf77 Patch summary: First 3 patches prepares the eswitch to handle vports in more generic way using xarray to lookup vport from its unique vport number. Patch-1 returns maximum eswitch ports only when eswitch is enabled Patch-2 prepares eswitch to return eswitch max ports from a struct Patch-3 uses xarray for vport and representor lookup Patch-4 considers SF for an additioanl range of SF vports Patch-5 relies on SF hw table to check SF support Patch-6 extends SF devlink port attribute for external flag Patch-7 stores the per controller SF allocation attributes Patch-8 uses SF function id for filtering events Patch-9 uses helper for allocation and free Patch-10 splits hw table into per controller table and generic one Patch-11 extends sf table for additional range ================== ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
1e5e4acb66
@ -96,7 +96,7 @@ int esw_acl_egress_lgcy_setup(struct mlx5_eswitch *esw,
|
||||
}
|
||||
|
||||
if (!vport->egress.acl) {
|
||||
vport->egress.acl = esw_acl_table_create(esw, vport->vport,
|
||||
vport->egress.acl = esw_acl_table_create(esw, vport,
|
||||
MLX5_FLOW_NAMESPACE_ESW_EGRESS,
|
||||
table_size);
|
||||
if (IS_ERR(vport->egress.acl)) {
|
||||
|
@ -148,7 +148,7 @@ static void esw_acl_egress_ofld_groups_destroy(struct mlx5_vport *vport)
|
||||
esw_acl_egress_vlan_grp_destroy(vport);
|
||||
}
|
||||
|
||||
static bool esw_acl_egress_needed(const struct mlx5_eswitch *esw, u16 vport_num)
|
||||
static bool esw_acl_egress_needed(struct mlx5_eswitch *esw, u16 vport_num)
|
||||
{
|
||||
return mlx5_eswitch_is_vf_vport(esw, vport_num) || mlx5_esw_is_sf_vport(esw, vport_num);
|
||||
}
|
||||
@ -171,7 +171,7 @@ int esw_acl_egress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport
|
||||
table_size++;
|
||||
if (MLX5_CAP_GEN(esw->dev, prio_tag_required))
|
||||
table_size++;
|
||||
vport->egress.acl = esw_acl_table_create(esw, vport->vport,
|
||||
vport->egress.acl = esw_acl_table_create(esw, vport,
|
||||
MLX5_FLOW_NAMESPACE_ESW_EGRESS, table_size);
|
||||
if (IS_ERR(vport->egress.acl)) {
|
||||
err = PTR_ERR(vport->egress.acl);
|
||||
|
@ -6,14 +6,14 @@
|
||||
#include "helper.h"
|
||||
|
||||
struct mlx5_flow_table *
|
||||
esw_acl_table_create(struct mlx5_eswitch *esw, u16 vport_num, int ns, int size)
|
||||
esw_acl_table_create(struct mlx5_eswitch *esw, struct mlx5_vport *vport, int ns, int size)
|
||||
{
|
||||
struct mlx5_flow_table_attr ft_attr = {};
|
||||
struct mlx5_core_dev *dev = esw->dev;
|
||||
struct mlx5_flow_namespace *root_ns;
|
||||
struct mlx5_flow_table *acl;
|
||||
int acl_supported;
|
||||
int vport_index;
|
||||
u16 vport_num;
|
||||
int err;
|
||||
|
||||
acl_supported = (ns == MLX5_FLOW_NAMESPACE_ESW_INGRESS) ?
|
||||
@ -23,11 +23,11 @@ esw_acl_table_create(struct mlx5_eswitch *esw, u16 vport_num, int ns, int size)
|
||||
if (!acl_supported)
|
||||
return ERR_PTR(-EOPNOTSUPP);
|
||||
|
||||
vport_num = vport->vport;
|
||||
esw_debug(dev, "Create vport[%d] %s ACL table\n", vport_num,
|
||||
ns == MLX5_FLOW_NAMESPACE_ESW_INGRESS ? "ingress" : "egress");
|
||||
|
||||
vport_index = mlx5_eswitch_vport_num_to_index(esw, vport_num);
|
||||
root_ns = mlx5_get_flow_vport_acl_namespace(dev, ns, vport_index);
|
||||
root_ns = mlx5_get_flow_vport_acl_namespace(dev, ns, vport->index);
|
||||
if (!root_ns) {
|
||||
esw_warn(dev, "Failed to get E-Switch root namespace for vport (%d)\n",
|
||||
vport_num);
|
||||
|
@ -8,7 +8,7 @@
|
||||
|
||||
/* General acl helper functions */
|
||||
struct mlx5_flow_table *
|
||||
esw_acl_table_create(struct mlx5_eswitch *esw, u16 vport_num, int ns, int size);
|
||||
esw_acl_table_create(struct mlx5_eswitch *esw, struct mlx5_vport *vport, int ns, int size);
|
||||
|
||||
/* Egress acl helper functions */
|
||||
void esw_acl_egress_table_destroy(struct mlx5_vport *vport);
|
||||
|
@ -177,7 +177,7 @@ int esw_acl_ingress_lgcy_setup(struct mlx5_eswitch *esw,
|
||||
}
|
||||
|
||||
if (!vport->ingress.acl) {
|
||||
vport->ingress.acl = esw_acl_table_create(esw, vport->vport,
|
||||
vport->ingress.acl = esw_acl_table_create(esw, vport,
|
||||
MLX5_FLOW_NAMESPACE_ESW_INGRESS,
|
||||
table_size);
|
||||
if (IS_ERR(vport->ingress.acl)) {
|
||||
|
@ -7,7 +7,7 @@
|
||||
#include "ofld.h"
|
||||
|
||||
static bool
|
||||
esw_acl_ingress_prio_tag_enabled(const struct mlx5_eswitch *esw,
|
||||
esw_acl_ingress_prio_tag_enabled(struct mlx5_eswitch *esw,
|
||||
const struct mlx5_vport *vport)
|
||||
{
|
||||
return (MLX5_CAP_GEN(esw->dev, prio_tag_required) &&
|
||||
@ -255,7 +255,7 @@ int esw_acl_ingress_ofld_setup(struct mlx5_eswitch *esw,
|
||||
if (esw_acl_ingress_prio_tag_enabled(esw, vport))
|
||||
num_ftes++;
|
||||
|
||||
vport->ingress.acl = esw_acl_table_create(esw, vport->vport,
|
||||
vport->ingress.acl = esw_acl_table_create(esw, vport,
|
||||
MLX5_FLOW_NAMESPACE_ESW_INGRESS,
|
||||
num_ftes);
|
||||
if (IS_ERR(vport->ingress.acl)) {
|
||||
|
@ -14,8 +14,7 @@ mlx5_esw_get_port_parent_id(struct mlx5_core_dev *dev, struct netdev_phys_item_i
|
||||
memcpy(ppid->id, &parent_id, sizeof(parent_id));
|
||||
}
|
||||
|
||||
static bool
|
||||
mlx5_esw_devlink_port_supported(const struct mlx5_eswitch *esw, u16 vport_num)
|
||||
static bool mlx5_esw_devlink_port_supported(struct mlx5_eswitch *esw, u16 vport_num)
|
||||
{
|
||||
return vport_num == MLX5_VPORT_UPLINK ||
|
||||
(mlx5_core_is_ecpf(esw->dev) && vport_num == MLX5_VPORT_PF) ||
|
||||
@ -124,7 +123,7 @@ struct devlink_port *mlx5_esw_offloads_devlink_port(struct mlx5_eswitch *esw, u1
|
||||
}
|
||||
|
||||
int mlx5_esw_devlink_sf_port_register(struct mlx5_eswitch *esw, struct devlink_port *dl_port,
|
||||
u16 vport_num, u32 sfnum)
|
||||
u16 vport_num, u32 controller, u32 sfnum)
|
||||
{
|
||||
struct mlx5_core_dev *dev = esw->dev;
|
||||
struct netdev_phys_item_id ppid = {};
|
||||
@ -142,7 +141,7 @@ int mlx5_esw_devlink_sf_port_register(struct mlx5_eswitch *esw, struct devlink_p
|
||||
mlx5_esw_get_port_parent_id(dev, &ppid);
|
||||
memcpy(dl_port->attrs.switch_id.id, &ppid.id[0], ppid.id_len);
|
||||
dl_port->attrs.switch_id.id_len = ppid.id_len;
|
||||
devlink_port_attrs_pci_sf_set(dl_port, 0, pfnum, sfnum);
|
||||
devlink_port_attrs_pci_sf_set(dl_port, controller, pfnum, sfnum, !!controller);
|
||||
devlink = priv_to_devlink(dev);
|
||||
dl_port_index = mlx5_esw_vport_to_devlink_port_index(dev, vport_num);
|
||||
err = devlink_port_register(devlink, dl_port, dl_port_index);
|
||||
|
@ -216,7 +216,8 @@ static void esw_destroy_legacy_table(struct mlx5_eswitch *esw)
|
||||
int esw_legacy_enable(struct mlx5_eswitch *esw)
|
||||
{
|
||||
struct mlx5_vport *vport;
|
||||
int ret, i;
|
||||
unsigned long i;
|
||||
int ret;
|
||||
|
||||
ret = esw_create_legacy_table(esw);
|
||||
if (ret)
|
||||
|
@ -88,20 +88,17 @@ struct mlx5_eswitch *mlx5_devlink_eswitch_get(struct devlink *devlink)
|
||||
struct mlx5_vport *__must_check
|
||||
mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num)
|
||||
{
|
||||
u16 idx;
|
||||
struct mlx5_vport *vport;
|
||||
|
||||
if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager))
|
||||
return ERR_PTR(-EPERM);
|
||||
|
||||
idx = mlx5_eswitch_vport_num_to_index(esw, vport_num);
|
||||
|
||||
if (idx > esw->total_vports - 1) {
|
||||
esw_debug(esw->dev, "vport out of range: num(0x%x), idx(0x%x)\n",
|
||||
vport_num, idx);
|
||||
vport = xa_load(&esw->vports, vport_num);
|
||||
if (!vport) {
|
||||
esw_debug(esw->dev, "vport out of range: num(0x%x)\n", vport_num);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
return &esw->vports[idx];
|
||||
return vport;
|
||||
}
|
||||
|
||||
static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
|
||||
@ -345,9 +342,10 @@ static void update_allmulti_vports(struct mlx5_eswitch *esw,
|
||||
{
|
||||
u8 *mac = vaddr->node.addr;
|
||||
struct mlx5_vport *vport;
|
||||
u16 i, vport_num;
|
||||
unsigned long i;
|
||||
u16 vport_num;
|
||||
|
||||
mlx5_esw_for_all_vports(esw, i, vport) {
|
||||
mlx5_esw_for_each_vport(esw, i, vport) {
|
||||
struct hlist_head *vport_hash = vport->mc_list;
|
||||
struct vport_addr *iter_vaddr =
|
||||
l2addr_hash_find(vport_hash,
|
||||
@ -1175,7 +1173,7 @@ static void mlx5_eswitch_event_handlers_unregister(struct mlx5_eswitch *esw)
|
||||
static void mlx5_eswitch_clear_vf_vports_info(struct mlx5_eswitch *esw)
|
||||
{
|
||||
struct mlx5_vport *vport;
|
||||
int i;
|
||||
unsigned long i;
|
||||
|
||||
mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
|
||||
memset(&vport->qos, 0, sizeof(vport->qos));
|
||||
@ -1213,20 +1211,25 @@ void mlx5_eswitch_unload_vport(struct mlx5_eswitch *esw, u16 vport_num)
|
||||
|
||||
void mlx5_eswitch_unload_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs)
|
||||
{
|
||||
int i;
|
||||
struct mlx5_vport *vport;
|
||||
unsigned long i;
|
||||
|
||||
mlx5_esw_for_each_vf_vport_num_reverse(esw, i, num_vfs)
|
||||
mlx5_eswitch_unload_vport(esw, i);
|
||||
mlx5_esw_for_each_vf_vport(esw, i, vport, num_vfs) {
|
||||
if (!vport->enabled)
|
||||
continue;
|
||||
mlx5_eswitch_unload_vport(esw, vport->vport);
|
||||
}
|
||||
}
|
||||
|
||||
int mlx5_eswitch_load_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs,
|
||||
enum mlx5_eswitch_vport_event enabled_events)
|
||||
{
|
||||
struct mlx5_vport *vport;
|
||||
unsigned long i;
|
||||
int err;
|
||||
int i;
|
||||
|
||||
mlx5_esw_for_each_vf_vport_num(esw, i, num_vfs) {
|
||||
err = mlx5_eswitch_load_vport(esw, i, enabled_events);
|
||||
mlx5_esw_for_each_vf_vport(esw, i, vport, num_vfs) {
|
||||
err = mlx5_eswitch_load_vport(esw, vport->vport, enabled_events);
|
||||
if (err)
|
||||
goto vf_err;
|
||||
}
|
||||
@ -1234,7 +1237,7 @@ int mlx5_eswitch_load_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs,
|
||||
return 0;
|
||||
|
||||
vf_err:
|
||||
mlx5_eswitch_unload_vf_vports(esw, i - 1);
|
||||
mlx5_eswitch_unload_vf_vports(esw, num_vfs);
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -1563,24 +1566,161 @@ void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf)
|
||||
up_write(&esw->mode_lock);
|
||||
}
|
||||
|
||||
static int mlx5_query_hca_cap_host_pf(struct mlx5_core_dev *dev, void *out)
|
||||
{
|
||||
u16 opmod = (MLX5_CAP_GENERAL << 1) | (HCA_CAP_OPMOD_GET_MAX & 0x01);
|
||||
u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)] = {};
|
||||
|
||||
MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
|
||||
MLX5_SET(query_hca_cap_in, in, op_mod, opmod);
|
||||
MLX5_SET(query_hca_cap_in, in, function_id, MLX5_VPORT_PF);
|
||||
MLX5_SET(query_hca_cap_in, in, other_function, true);
|
||||
return mlx5_cmd_exec_inout(dev, query_hca_cap, in, out);
|
||||
}
|
||||
|
||||
int mlx5_esw_sf_max_hpf_functions(struct mlx5_core_dev *dev, u16 *max_sfs, u16 *sf_base_id)
|
||||
|
||||
{
|
||||
int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
|
||||
void *query_ctx;
|
||||
void *hca_caps;
|
||||
int err;
|
||||
|
||||
if (!mlx5_core_is_ecpf(dev)) {
|
||||
*max_sfs = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
query_ctx = kzalloc(query_out_sz, GFP_KERNEL);
|
||||
if (!query_ctx)
|
||||
return -ENOMEM;
|
||||
|
||||
err = mlx5_query_hca_cap_host_pf(dev, query_ctx);
|
||||
if (err)
|
||||
goto out_free;
|
||||
|
||||
hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
|
||||
*max_sfs = MLX5_GET(cmd_hca_cap, hca_caps, max_num_sf);
|
||||
*sf_base_id = MLX5_GET(cmd_hca_cap, hca_caps, sf_base_id);
|
||||
|
||||
out_free:
|
||||
kfree(query_ctx);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int mlx5_esw_vport_alloc(struct mlx5_eswitch *esw, struct mlx5_core_dev *dev,
|
||||
int index, u16 vport_num)
|
||||
{
|
||||
struct mlx5_vport *vport;
|
||||
int err;
|
||||
|
||||
vport = kzalloc(sizeof(*vport), GFP_KERNEL);
|
||||
if (!vport)
|
||||
return -ENOMEM;
|
||||
|
||||
vport->dev = esw->dev;
|
||||
vport->vport = vport_num;
|
||||
vport->index = index;
|
||||
vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO;
|
||||
INIT_WORK(&vport->vport_change_handler, esw_vport_change_handler);
|
||||
err = xa_insert(&esw->vports, vport_num, vport, GFP_KERNEL);
|
||||
if (err)
|
||||
goto insert_err;
|
||||
|
||||
esw->total_vports++;
|
||||
return 0;
|
||||
|
||||
insert_err:
|
||||
kfree(vport);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void mlx5_esw_vport_free(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
|
||||
{
|
||||
xa_erase(&esw->vports, vport->vport);
|
||||
kfree(vport);
|
||||
}
|
||||
|
||||
static void mlx5_esw_vports_cleanup(struct mlx5_eswitch *esw)
|
||||
{
|
||||
struct mlx5_vport *vport;
|
||||
unsigned long i;
|
||||
|
||||
mlx5_esw_for_each_vport(esw, i, vport)
|
||||
mlx5_esw_vport_free(esw, vport);
|
||||
xa_destroy(&esw->vports);
|
||||
}
|
||||
|
||||
static int mlx5_esw_vports_init(struct mlx5_eswitch *esw)
|
||||
{
|
||||
struct mlx5_core_dev *dev = esw->dev;
|
||||
u16 max_host_pf_sfs;
|
||||
u16 base_sf_num;
|
||||
int idx = 0;
|
||||
int err;
|
||||
int i;
|
||||
|
||||
xa_init(&esw->vports);
|
||||
|
||||
err = mlx5_esw_vport_alloc(esw, dev, idx, MLX5_VPORT_PF);
|
||||
if (err)
|
||||
goto err;
|
||||
if (esw->first_host_vport == MLX5_VPORT_PF)
|
||||
xa_set_mark(&esw->vports, idx, MLX5_ESW_VPT_HOST_FN);
|
||||
idx++;
|
||||
|
||||
for (i = 0; i < mlx5_core_max_vfs(dev); i++) {
|
||||
err = mlx5_esw_vport_alloc(esw, dev, idx, idx);
|
||||
if (err)
|
||||
goto err;
|
||||
xa_set_mark(&esw->vports, idx, MLX5_ESW_VPT_VF);
|
||||
xa_set_mark(&esw->vports, idx, MLX5_ESW_VPT_HOST_FN);
|
||||
idx++;
|
||||
}
|
||||
base_sf_num = mlx5_sf_start_function_id(dev);
|
||||
for (i = 0; i < mlx5_sf_max_functions(dev); i++) {
|
||||
err = mlx5_esw_vport_alloc(esw, dev, idx, base_sf_num + i);
|
||||
if (err)
|
||||
goto err;
|
||||
xa_set_mark(&esw->vports, base_sf_num + i, MLX5_ESW_VPT_SF);
|
||||
idx++;
|
||||
}
|
||||
|
||||
err = mlx5_esw_sf_max_hpf_functions(dev, &max_host_pf_sfs, &base_sf_num);
|
||||
if (err)
|
||||
goto err;
|
||||
for (i = 0; i < max_host_pf_sfs; i++) {
|
||||
err = mlx5_esw_vport_alloc(esw, dev, idx, base_sf_num + i);
|
||||
if (err)
|
||||
goto err;
|
||||
xa_set_mark(&esw->vports, base_sf_num + i, MLX5_ESW_VPT_SF);
|
||||
idx++;
|
||||
}
|
||||
|
||||
if (mlx5_ecpf_vport_exists(dev)) {
|
||||
err = mlx5_esw_vport_alloc(esw, dev, idx, MLX5_VPORT_ECPF);
|
||||
if (err)
|
||||
goto err;
|
||||
idx++;
|
||||
}
|
||||
err = mlx5_esw_vport_alloc(esw, dev, idx, MLX5_VPORT_UPLINK);
|
||||
if (err)
|
||||
goto err;
|
||||
return 0;
|
||||
|
||||
err:
|
||||
mlx5_esw_vports_cleanup(esw);
|
||||
return err;
|
||||
}
|
||||
|
||||
int mlx5_eswitch_init(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct mlx5_eswitch *esw;
|
||||
struct mlx5_vport *vport;
|
||||
int total_vports;
|
||||
int err, i;
|
||||
int err;
|
||||
|
||||
if (!MLX5_VPORT_MANAGER(dev))
|
||||
return 0;
|
||||
|
||||
total_vports = mlx5_eswitch_get_total_vports(dev);
|
||||
|
||||
esw_info(dev,
|
||||
"Total vports %d, per vport: max uc(%d) max mc(%d)\n",
|
||||
total_vports,
|
||||
MLX5_MAX_UC_PER_VPORT(dev),
|
||||
MLX5_MAX_MC_PER_VPORT(dev));
|
||||
|
||||
esw = kzalloc(sizeof(*esw), GFP_KERNEL);
|
||||
if (!esw)
|
||||
return -ENOMEM;
|
||||
@ -1595,18 +1735,13 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
|
||||
goto abort;
|
||||
}
|
||||
|
||||
esw->vports = kcalloc(total_vports, sizeof(struct mlx5_vport),
|
||||
GFP_KERNEL);
|
||||
if (!esw->vports) {
|
||||
err = -ENOMEM;
|
||||
err = mlx5_esw_vports_init(esw);
|
||||
if (err)
|
||||
goto abort;
|
||||
}
|
||||
|
||||
esw->total_vports = total_vports;
|
||||
|
||||
err = esw_offloads_init_reps(esw);
|
||||
if (err)
|
||||
goto abort;
|
||||
goto reps_err;
|
||||
|
||||
mutex_init(&esw->offloads.encap_tbl_lock);
|
||||
hash_init(esw->offloads.encap_tbl);
|
||||
@ -1619,25 +1754,25 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
|
||||
mutex_init(&esw->state_lock);
|
||||
init_rwsem(&esw->mode_lock);
|
||||
|
||||
mlx5_esw_for_all_vports(esw, i, vport) {
|
||||
vport->vport = mlx5_eswitch_index_to_vport_num(esw, i);
|
||||
vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO;
|
||||
vport->dev = dev;
|
||||
INIT_WORK(&vport->vport_change_handler,
|
||||
esw_vport_change_handler);
|
||||
}
|
||||
|
||||
esw->enabled_vports = 0;
|
||||
esw->mode = MLX5_ESWITCH_NONE;
|
||||
esw->offloads.inline_mode = MLX5_INLINE_MODE_NONE;
|
||||
|
||||
dev->priv.eswitch = esw;
|
||||
BLOCKING_INIT_NOTIFIER_HEAD(&esw->n_head);
|
||||
|
||||
esw_info(dev,
|
||||
"Total vports %d, per vport: max uc(%d) max mc(%d)\n",
|
||||
esw->total_vports,
|
||||
MLX5_MAX_UC_PER_VPORT(dev),
|
||||
MLX5_MAX_MC_PER_VPORT(dev));
|
||||
return 0;
|
||||
|
||||
reps_err:
|
||||
mlx5_esw_vports_cleanup(esw);
|
||||
abort:
|
||||
if (esw->work_queue)
|
||||
destroy_workqueue(esw->work_queue);
|
||||
kfree(esw->vports);
|
||||
kfree(esw);
|
||||
return err;
|
||||
}
|
||||
@ -1659,7 +1794,7 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
|
||||
mutex_destroy(&esw->offloads.encap_tbl_lock);
|
||||
mutex_destroy(&esw->offloads.decap_tbl_lock);
|
||||
esw_offloads_cleanup_reps(esw);
|
||||
kfree(esw->vports);
|
||||
mlx5_esw_vports_cleanup(esw);
|
||||
kfree(esw);
|
||||
}
|
||||
|
||||
@ -1718,8 +1853,29 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
|
||||
return err;
|
||||
}
|
||||
|
||||
static bool mlx5_esw_check_port_type(struct mlx5_eswitch *esw, u16 vport_num, xa_mark_t mark)
|
||||
{
|
||||
struct mlx5_vport *vport;
|
||||
|
||||
vport = mlx5_eswitch_get_vport(esw, vport_num);
|
||||
if (IS_ERR(vport))
|
||||
return false;
|
||||
|
||||
return xa_get_mark(&esw->vports, vport_num, mark);
|
||||
}
|
||||
|
||||
bool mlx5_eswitch_is_vf_vport(struct mlx5_eswitch *esw, u16 vport_num)
|
||||
{
|
||||
return mlx5_esw_check_port_type(esw, vport_num, MLX5_ESW_VPT_VF);
|
||||
}
|
||||
|
||||
bool mlx5_esw_is_sf_vport(struct mlx5_eswitch *esw, u16 vport_num)
|
||||
{
|
||||
return mlx5_esw_check_port_type(esw, vport_num, MLX5_ESW_VPT_SF);
|
||||
}
|
||||
|
||||
static bool
|
||||
is_port_function_supported(const struct mlx5_eswitch *esw, u16 vport_num)
|
||||
is_port_function_supported(struct mlx5_eswitch *esw, u16 vport_num)
|
||||
{
|
||||
return vport_num == MLX5_VPORT_PF ||
|
||||
mlx5_eswitch_is_vf_vport(esw, vport_num) ||
|
||||
@ -1891,9 +2047,9 @@ static u32 calculate_vports_min_rate_divider(struct mlx5_eswitch *esw)
|
||||
u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
|
||||
struct mlx5_vport *evport;
|
||||
u32 max_guarantee = 0;
|
||||
int i;
|
||||
unsigned long i;
|
||||
|
||||
mlx5_esw_for_all_vports(esw, i, evport) {
|
||||
mlx5_esw_for_each_vport(esw, i, evport) {
|
||||
if (!evport->enabled || evport->qos.min_rate < max_guarantee)
|
||||
continue;
|
||||
max_guarantee = evport->qos.min_rate;
|
||||
@ -1911,11 +2067,11 @@ static int normalize_vports_min_rate(struct mlx5_eswitch *esw)
|
||||
struct mlx5_vport *evport;
|
||||
u32 vport_max_rate;
|
||||
u32 vport_min_rate;
|
||||
unsigned long i;
|
||||
u32 bw_share;
|
||||
int err;
|
||||
int i;
|
||||
|
||||
mlx5_esw_for_all_vports(esw, i, evport) {
|
||||
mlx5_esw_for_each_vport(esw, i, evport) {
|
||||
if (!evport->enabled)
|
||||
continue;
|
||||
vport_min_rate = evport->qos.min_rate;
|
||||
@ -2205,3 +2361,19 @@ void mlx5_esw_unlock(struct mlx5_eswitch *esw)
|
||||
{
|
||||
up_write(&esw->mode_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* mlx5_eswitch_get_total_vports - Get total vports of the eswitch
|
||||
*
|
||||
* @dev: Pointer to core device
|
||||
*
|
||||
* mlx5_eswitch_get_total_vports returns total number of eswitch vports.
|
||||
*/
|
||||
u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct mlx5_eswitch *esw;
|
||||
|
||||
esw = dev->priv.eswitch;
|
||||
return mlx5_esw_allowed(esw) ? esw->total_vports : 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx5_eswitch_get_total_vports);
|
||||
|
@ -176,6 +176,7 @@ struct mlx5_vport {
|
||||
u16 vport;
|
||||
bool enabled;
|
||||
enum mlx5_eswitch_vport_event enabled_events;
|
||||
int index;
|
||||
struct devlink_port *dl_port;
|
||||
};
|
||||
|
||||
@ -228,7 +229,7 @@ struct mlx5_esw_offload {
|
||||
|
||||
struct mlx5_flow_table *ft_offloads;
|
||||
struct mlx5_flow_group *vport_rx_group;
|
||||
struct mlx5_eswitch_rep *vport_reps;
|
||||
struct xarray vport_reps;
|
||||
struct list_head peer_flows;
|
||||
struct mutex peer_mutex;
|
||||
struct mutex encap_tbl_lock; /* protects encap_tbl */
|
||||
@ -278,7 +279,7 @@ struct mlx5_eswitch {
|
||||
struct esw_mc_addr mc_promisc;
|
||||
/* end of legacy */
|
||||
struct workqueue_struct *work_queue;
|
||||
struct mlx5_vport *vports;
|
||||
struct xarray vports;
|
||||
u32 flags;
|
||||
int total_vports;
|
||||
int enabled_vports;
|
||||
@ -545,94 +546,11 @@ static inline u16 mlx5_eswitch_first_host_vport_num(struct mlx5_core_dev *dev)
|
||||
MLX5_VPORT_PF : MLX5_VPORT_FIRST_VF;
|
||||
}
|
||||
|
||||
static inline int mlx5_esw_sf_start_idx(const struct mlx5_eswitch *esw)
|
||||
{
|
||||
/* PF and VF vports indices start from 0 to max_vfs */
|
||||
return MLX5_VPORT_PF_PLACEHOLDER + mlx5_core_max_vfs(esw->dev);
|
||||
}
|
||||
|
||||
static inline int mlx5_esw_sf_end_idx(const struct mlx5_eswitch *esw)
|
||||
{
|
||||
return mlx5_esw_sf_start_idx(esw) + mlx5_sf_max_functions(esw->dev);
|
||||
}
|
||||
|
||||
static inline int
|
||||
mlx5_esw_sf_vport_num_to_index(const struct mlx5_eswitch *esw, u16 vport_num)
|
||||
{
|
||||
return vport_num - mlx5_sf_start_function_id(esw->dev) +
|
||||
MLX5_VPORT_PF_PLACEHOLDER + mlx5_core_max_vfs(esw->dev);
|
||||
}
|
||||
|
||||
static inline u16
|
||||
mlx5_esw_sf_vport_index_to_num(const struct mlx5_eswitch *esw, int idx)
|
||||
{
|
||||
return mlx5_sf_start_function_id(esw->dev) + idx -
|
||||
(MLX5_VPORT_PF_PLACEHOLDER + mlx5_core_max_vfs(esw->dev));
|
||||
}
|
||||
|
||||
static inline bool
|
||||
mlx5_esw_is_sf_vport(const struct mlx5_eswitch *esw, u16 vport_num)
|
||||
{
|
||||
return mlx5_sf_supported(esw->dev) &&
|
||||
vport_num >= mlx5_sf_start_function_id(esw->dev) &&
|
||||
(vport_num < (mlx5_sf_start_function_id(esw->dev) +
|
||||
mlx5_sf_max_functions(esw->dev)));
|
||||
}
|
||||
|
||||
static inline bool mlx5_eswitch_is_funcs_handler(const struct mlx5_core_dev *dev)
|
||||
{
|
||||
return mlx5_core_is_ecpf_esw_manager(dev);
|
||||
}
|
||||
|
||||
static inline int mlx5_eswitch_uplink_idx(struct mlx5_eswitch *esw)
|
||||
{
|
||||
/* Uplink always locate at the last element of the array.*/
|
||||
return esw->total_vports - 1;
|
||||
}
|
||||
|
||||
static inline int mlx5_eswitch_ecpf_idx(struct mlx5_eswitch *esw)
|
||||
{
|
||||
return esw->total_vports - 2;
|
||||
}
|
||||
|
||||
static inline int mlx5_eswitch_vport_num_to_index(struct mlx5_eswitch *esw,
|
||||
u16 vport_num)
|
||||
{
|
||||
if (vport_num == MLX5_VPORT_ECPF) {
|
||||
if (!mlx5_ecpf_vport_exists(esw->dev))
|
||||
esw_warn(esw->dev, "ECPF vport doesn't exist!\n");
|
||||
return mlx5_eswitch_ecpf_idx(esw);
|
||||
}
|
||||
|
||||
if (vport_num == MLX5_VPORT_UPLINK)
|
||||
return mlx5_eswitch_uplink_idx(esw);
|
||||
|
||||
if (mlx5_esw_is_sf_vport(esw, vport_num))
|
||||
return mlx5_esw_sf_vport_num_to_index(esw, vport_num);
|
||||
|
||||
/* PF and VF vports start from 0 to max_vfs */
|
||||
return vport_num;
|
||||
}
|
||||
|
||||
static inline u16 mlx5_eswitch_index_to_vport_num(struct mlx5_eswitch *esw,
|
||||
int index)
|
||||
{
|
||||
if (index == mlx5_eswitch_ecpf_idx(esw) &&
|
||||
mlx5_ecpf_vport_exists(esw->dev))
|
||||
return MLX5_VPORT_ECPF;
|
||||
|
||||
if (index == mlx5_eswitch_uplink_idx(esw))
|
||||
return MLX5_VPORT_UPLINK;
|
||||
|
||||
/* SF vports indices are after VFs and before ECPF */
|
||||
if (mlx5_sf_supported(esw->dev) &&
|
||||
index > mlx5_core_max_vfs(esw->dev))
|
||||
return mlx5_esw_sf_vport_index_to_num(esw, index);
|
||||
|
||||
/* PF and VF vports start from 0 to max_vfs */
|
||||
return index;
|
||||
}
|
||||
|
||||
static inline unsigned int
|
||||
mlx5_esw_vport_to_devlink_port_index(const struct mlx5_core_dev *dev,
|
||||
u16 vport_num)
|
||||
@ -649,82 +567,42 @@ mlx5_esw_devlink_port_index_to_vport_num(unsigned int dl_port_index)
|
||||
/* TODO: This mlx5e_tc function shouldn't be called by eswitch */
|
||||
void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw);
|
||||
|
||||
/* The vport getter/iterator are only valid after esw->total_vports
|
||||
* and vport->vport are initialized in mlx5_eswitch_init.
|
||||
/* Each mark identifies eswitch vport type.
|
||||
* MLX5_ESW_VPT_HOST_FN is used to identify both PF and VF ports using
|
||||
* a single mark.
|
||||
* MLX5_ESW_VPT_VF identifies a SRIOV VF vport.
|
||||
* MLX5_ESW_VPT_SF identifies SF vport.
|
||||
*/
|
||||
#define mlx5_esw_for_all_vports(esw, i, vport) \
|
||||
for ((i) = MLX5_VPORT_PF; \
|
||||
(vport) = &(esw)->vports[i], \
|
||||
(i) < (esw)->total_vports; (i)++)
|
||||
#define MLX5_ESW_VPT_HOST_FN XA_MARK_0
|
||||
#define MLX5_ESW_VPT_VF XA_MARK_1
|
||||
#define MLX5_ESW_VPT_SF XA_MARK_2
|
||||
|
||||
#define mlx5_esw_for_all_vports_reverse(esw, i, vport) \
|
||||
for ((i) = (esw)->total_vports - 1; \
|
||||
(vport) = &(esw)->vports[i], \
|
||||
(i) >= MLX5_VPORT_PF; (i)--)
|
||||
|
||||
#define mlx5_esw_for_each_vf_vport(esw, i, vport, nvfs) \
|
||||
for ((i) = MLX5_VPORT_FIRST_VF; \
|
||||
(vport) = &(esw)->vports[(i)], \
|
||||
(i) <= (nvfs); (i)++)
|
||||
|
||||
#define mlx5_esw_for_each_vf_vport_reverse(esw, i, vport, nvfs) \
|
||||
for ((i) = (nvfs); \
|
||||
(vport) = &(esw)->vports[(i)], \
|
||||
(i) >= MLX5_VPORT_FIRST_VF; (i)--)
|
||||
|
||||
/* The rep getter/iterator are only valid after esw->total_vports
|
||||
* and vport->vport are initialized in mlx5_eswitch_init.
|
||||
/* The vport iterator is valid only after vport are initialized in mlx5_eswitch_init.
|
||||
* Borrowed the idea from xa_for_each_marked() but with support for desired last element.
|
||||
*/
|
||||
#define mlx5_esw_for_all_reps(esw, i, rep) \
|
||||
for ((i) = MLX5_VPORT_PF; \
|
||||
(rep) = &(esw)->offloads.vport_reps[i], \
|
||||
(i) < (esw)->total_vports; (i)++)
|
||||
|
||||
#define mlx5_esw_for_each_vf_rep(esw, i, rep, nvfs) \
|
||||
for ((i) = MLX5_VPORT_FIRST_VF; \
|
||||
(rep) = &(esw)->offloads.vport_reps[i], \
|
||||
(i) <= (nvfs); (i)++)
|
||||
#define mlx5_esw_for_each_vport(esw, index, vport) \
|
||||
xa_for_each(&((esw)->vports), index, vport)
|
||||
|
||||
#define mlx5_esw_for_each_vf_rep_reverse(esw, i, rep, nvfs) \
|
||||
for ((i) = (nvfs); \
|
||||
(rep) = &(esw)->offloads.vport_reps[i], \
|
||||
(i) >= MLX5_VPORT_FIRST_VF; (i)--)
|
||||
#define mlx5_esw_for_each_entry_marked(xa, index, entry, last, filter) \
|
||||
for (index = 0, entry = xa_find(xa, &index, last, filter); \
|
||||
entry; entry = xa_find_after(xa, &index, last, filter))
|
||||
|
||||
#define mlx5_esw_for_each_vf_vport_num(esw, vport, nvfs) \
|
||||
for ((vport) = MLX5_VPORT_FIRST_VF; (vport) <= (nvfs); (vport)++)
|
||||
#define mlx5_esw_for_each_vport_marked(esw, index, vport, last, filter) \
|
||||
mlx5_esw_for_each_entry_marked(&((esw)->vports), index, vport, last, filter)
|
||||
|
||||
#define mlx5_esw_for_each_vf_vport_num_reverse(esw, vport, nvfs) \
|
||||
for ((vport) = (nvfs); (vport) >= MLX5_VPORT_FIRST_VF; (vport)--)
|
||||
#define mlx5_esw_for_each_vf_vport(esw, index, vport, last) \
|
||||
mlx5_esw_for_each_vport_marked(esw, index, vport, last, MLX5_ESW_VPT_VF)
|
||||
|
||||
/* Includes host PF (vport 0) if it's not esw manager. */
|
||||
#define mlx5_esw_for_each_host_func_rep(esw, i, rep, nvfs) \
|
||||
for ((i) = (esw)->first_host_vport; \
|
||||
(rep) = &(esw)->offloads.vport_reps[i], \
|
||||
(i) <= (nvfs); (i)++)
|
||||
|
||||
#define mlx5_esw_for_each_host_func_rep_reverse(esw, i, rep, nvfs) \
|
||||
for ((i) = (nvfs); \
|
||||
(rep) = &(esw)->offloads.vport_reps[i], \
|
||||
(i) >= (esw)->first_host_vport; (i)--)
|
||||
|
||||
#define mlx5_esw_for_each_host_func_vport(esw, vport, nvfs) \
|
||||
for ((vport) = (esw)->first_host_vport; \
|
||||
(vport) <= (nvfs); (vport)++)
|
||||
|
||||
#define mlx5_esw_for_each_host_func_vport_reverse(esw, vport, nvfs) \
|
||||
for ((vport) = (nvfs); \
|
||||
(vport) >= (esw)->first_host_vport; (vport)--)
|
||||
|
||||
#define mlx5_esw_for_each_sf_rep(esw, i, rep) \
|
||||
for ((i) = mlx5_esw_sf_start_idx(esw); \
|
||||
(rep) = &(esw)->offloads.vport_reps[(i)], \
|
||||
(i) < mlx5_esw_sf_end_idx(esw); (i++))
|
||||
#define mlx5_esw_for_each_host_func_vport(esw, index, vport, last) \
|
||||
mlx5_esw_for_each_vport_marked(esw, index, vport, last, MLX5_ESW_VPT_HOST_FN)
|
||||
|
||||
struct mlx5_eswitch *mlx5_devlink_eswitch_get(struct devlink *devlink);
|
||||
struct mlx5_vport *__must_check
|
||||
mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num);
|
||||
|
||||
bool mlx5_eswitch_is_vf_vport(const struct mlx5_eswitch *esw, u16 vport_num);
|
||||
bool mlx5_eswitch_is_vf_vport(struct mlx5_eswitch *esw, u16 vport_num);
|
||||
bool mlx5_esw_is_sf_vport(struct mlx5_eswitch *esw, u16 vport_num);
|
||||
|
||||
int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data);
|
||||
|
||||
@ -784,12 +662,13 @@ void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_eswitch *esw, u16 vpo
|
||||
struct devlink_port *mlx5_esw_offloads_devlink_port(struct mlx5_eswitch *esw, u16 vport_num);
|
||||
|
||||
int mlx5_esw_devlink_sf_port_register(struct mlx5_eswitch *esw, struct devlink_port *dl_port,
|
||||
u16 vport_num, u32 sfnum);
|
||||
u16 vport_num, u32 controller, u32 sfnum);
|
||||
void mlx5_esw_devlink_sf_port_unregister(struct mlx5_eswitch *esw, u16 vport_num);
|
||||
|
||||
int mlx5_esw_offloads_sf_vport_enable(struct mlx5_eswitch *esw, struct devlink_port *dl_port,
|
||||
u16 vport_num, u32 sfnum);
|
||||
u16 vport_num, u32 controller, u32 sfnum);
|
||||
void mlx5_esw_offloads_sf_vport_disable(struct mlx5_eswitch *esw, u16 vport_num);
|
||||
int mlx5_esw_sf_max_hpf_functions(struct mlx5_core_dev *dev, u16 *max_sfs, u16 *sf_base_id);
|
||||
|
||||
int mlx5_esw_vport_vhca_id_set(struct mlx5_eswitch *esw, u16 vport_num);
|
||||
void mlx5_esw_vport_vhca_id_clear(struct mlx5_eswitch *esw, u16 vport_num);
|
||||
@ -816,6 +695,8 @@ void mlx5_esw_unlock(struct mlx5_eswitch *esw);
|
||||
|
||||
void esw_vport_change_handle_locked(struct mlx5_vport *vport);
|
||||
|
||||
bool mlx5_esw_offloads_controller_valid(const struct mlx5_eswitch *esw, u32 controller);
|
||||
|
||||
#else /* CONFIG_MLX5_ESWITCH */
|
||||
/* eswitch API stubs */
|
||||
static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
|
||||
|
@ -49,6 +49,16 @@
|
||||
#include "en_tc.h"
|
||||
#include "en/mapping.h"
|
||||
|
||||
#define mlx5_esw_for_each_rep(esw, i, rep) \
|
||||
xa_for_each(&((esw)->offloads.vport_reps), i, rep)
|
||||
|
||||
#define mlx5_esw_for_each_sf_rep(esw, i, rep) \
|
||||
xa_for_each_marked(&((esw)->offloads.vport_reps), i, rep, MLX5_ESW_VPT_SF)
|
||||
|
||||
#define mlx5_esw_for_each_vf_rep(esw, index, rep) \
|
||||
mlx5_esw_for_each_entry_marked(&((esw)->offloads.vport_reps), index, \
|
||||
rep, (esw)->esw_funcs.num_vfs, MLX5_ESW_VPT_VF)
|
||||
|
||||
/* There are two match-all miss flows, one for unicast dst mac and
|
||||
* one for multicast.
|
||||
*/
|
||||
@ -67,10 +77,7 @@ static const struct esw_vport_tbl_namespace mlx5_esw_vport_tbl_mirror_ns = {
|
||||
static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw,
|
||||
u16 vport_num)
|
||||
{
|
||||
int idx = mlx5_eswitch_vport_num_to_index(esw, vport_num);
|
||||
|
||||
WARN_ON(idx > esw->total_vports - 1);
|
||||
return &esw->offloads.vport_reps[idx];
|
||||
return xa_load(&esw->offloads.vport_reps, vport_num);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -720,10 +727,11 @@ mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw,
|
||||
static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
|
||||
{
|
||||
struct mlx5_eswitch_rep *rep;
|
||||
int i, err = 0;
|
||||
unsigned long i;
|
||||
int err = 0;
|
||||
|
||||
esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
|
||||
mlx5_esw_for_each_host_func_rep(esw, i, rep, esw->esw_funcs.num_vfs) {
|
||||
mlx5_esw_for_each_host_func_vport(esw, i, rep, esw->esw_funcs.num_vfs) {
|
||||
if (atomic_read(&rep->rep_data[REP_ETH].state) != REP_LOADED)
|
||||
continue;
|
||||
|
||||
@ -972,13 +980,13 @@ void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule)
|
||||
static void mlx5_eswitch_del_send_to_vport_meta_rules(struct mlx5_eswitch *esw)
|
||||
{
|
||||
struct mlx5_flow_handle **flows = esw->fdb_table.offloads.send_to_vport_meta_rules;
|
||||
int i = 0, num_vfs = esw->esw_funcs.num_vfs, vport_num;
|
||||
int i = 0, num_vfs = esw->esw_funcs.num_vfs;
|
||||
|
||||
if (!num_vfs || !flows)
|
||||
return;
|
||||
|
||||
mlx5_esw_for_each_vf_vport_num(esw, vport_num, num_vfs)
|
||||
mlx5_del_flow_rules(flows[i++]);
|
||||
for (i = 0; i < num_vfs; i++)
|
||||
mlx5_del_flow_rules(flows[i]);
|
||||
|
||||
kvfree(flows);
|
||||
}
|
||||
@ -992,6 +1000,8 @@ mlx5_eswitch_add_send_to_vport_meta_rules(struct mlx5_eswitch *esw)
|
||||
struct mlx5_flow_handle *flow_rule;
|
||||
struct mlx5_flow_handle **flows;
|
||||
struct mlx5_flow_spec *spec;
|
||||
struct mlx5_vport *vport;
|
||||
unsigned long i;
|
||||
u16 vport_num;
|
||||
|
||||
num_vfs = esw->esw_funcs.num_vfs;
|
||||
@ -1016,7 +1026,8 @@ mlx5_eswitch_add_send_to_vport_meta_rules(struct mlx5_eswitch *esw)
|
||||
dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
|
||||
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
|
||||
|
||||
mlx5_esw_for_each_vf_vport_num(esw, vport_num, num_vfs) {
|
||||
mlx5_esw_for_each_vf_vport(esw, i, vport, num_vfs) {
|
||||
vport_num = vport->vport;
|
||||
MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_0,
|
||||
mlx5_eswitch_get_vport_metadata_for_match(esw, vport_num));
|
||||
dest.vport.num = vport_num;
|
||||
@ -1158,12 +1169,14 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
|
||||
struct mlx5_flow_destination dest = {};
|
||||
struct mlx5_flow_act flow_act = {0};
|
||||
struct mlx5_flow_handle **flows;
|
||||
struct mlx5_flow_handle *flow;
|
||||
struct mlx5_flow_spec *spec;
|
||||
/* total vports is the same for both e-switches */
|
||||
int nvports = esw->total_vports;
|
||||
struct mlx5_flow_handle *flow;
|
||||
struct mlx5_flow_spec *spec;
|
||||
struct mlx5_vport *vport;
|
||||
unsigned long i;
|
||||
void *misc;
|
||||
int err, i;
|
||||
int err;
|
||||
|
||||
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
|
||||
if (!spec)
|
||||
@ -1182,6 +1195,7 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
|
||||
misc_parameters);
|
||||
|
||||
if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
|
||||
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
|
||||
esw_set_peer_miss_rule_source_port(esw, peer_dev->priv.eswitch,
|
||||
spec, MLX5_VPORT_PF);
|
||||
|
||||
@ -1191,10 +1205,11 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
|
||||
err = PTR_ERR(flow);
|
||||
goto add_pf_flow_err;
|
||||
}
|
||||
flows[MLX5_VPORT_PF] = flow;
|
||||
flows[vport->index] = flow;
|
||||
}
|
||||
|
||||
if (mlx5_ecpf_vport_exists(esw->dev)) {
|
||||
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
|
||||
MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_ECPF);
|
||||
flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
|
||||
spec, &flow_act, &dest, 1);
|
||||
@ -1202,13 +1217,13 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
|
||||
err = PTR_ERR(flow);
|
||||
goto add_ecpf_flow_err;
|
||||
}
|
||||
flows[mlx5_eswitch_ecpf_idx(esw)] = flow;
|
||||
flows[vport->index] = flow;
|
||||
}
|
||||
|
||||
mlx5_esw_for_each_vf_vport_num(esw, i, mlx5_core_max_vfs(esw->dev)) {
|
||||
mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev)) {
|
||||
esw_set_peer_miss_rule_source_port(esw,
|
||||
peer_dev->priv.eswitch,
|
||||
spec, i);
|
||||
spec, vport->vport);
|
||||
|
||||
flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
|
||||
spec, &flow_act, &dest, 1);
|
||||
@ -1216,7 +1231,7 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
|
||||
err = PTR_ERR(flow);
|
||||
goto add_vf_flow_err;
|
||||
}
|
||||
flows[i] = flow;
|
||||
flows[vport->index] = flow;
|
||||
}
|
||||
|
||||
esw->fdb_table.offloads.peer_miss_rules = flows;
|
||||
@ -1225,15 +1240,20 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
|
||||
return 0;
|
||||
|
||||
add_vf_flow_err:
|
||||
nvports = --i;
|
||||
mlx5_esw_for_each_vf_vport_num_reverse(esw, i, nvports)
|
||||
mlx5_del_flow_rules(flows[i]);
|
||||
|
||||
if (mlx5_ecpf_vport_exists(esw->dev))
|
||||
mlx5_del_flow_rules(flows[mlx5_eswitch_ecpf_idx(esw)]);
|
||||
mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev)) {
|
||||
if (!flows[vport->index])
|
||||
continue;
|
||||
mlx5_del_flow_rules(flows[vport->index]);
|
||||
}
|
||||
if (mlx5_ecpf_vport_exists(esw->dev)) {
|
||||
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
|
||||
mlx5_del_flow_rules(flows[vport->index]);
|
||||
}
|
||||
add_ecpf_flow_err:
|
||||
if (mlx5_core_is_ecpf_esw_manager(esw->dev))
|
||||
mlx5_del_flow_rules(flows[MLX5_VPORT_PF]);
|
||||
if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
|
||||
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
|
||||
mlx5_del_flow_rules(flows[vport->index]);
|
||||
}
|
||||
add_pf_flow_err:
|
||||
esw_warn(esw->dev, "FDB: Failed to add peer miss flow rule err %d\n", err);
|
||||
kvfree(flows);
|
||||
@ -1245,20 +1265,23 @@ alloc_flows_err:
|
||||
static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch *esw)
|
||||
{
|
||||
struct mlx5_flow_handle **flows;
|
||||
int i;
|
||||
struct mlx5_vport *vport;
|
||||
unsigned long i;
|
||||
|
||||
flows = esw->fdb_table.offloads.peer_miss_rules;
|
||||
|
||||
mlx5_esw_for_each_vf_vport_num_reverse(esw, i,
|
||||
mlx5_core_max_vfs(esw->dev))
|
||||
mlx5_del_flow_rules(flows[i]);
|
||||
mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev))
|
||||
mlx5_del_flow_rules(flows[vport->index]);
|
||||
|
||||
if (mlx5_ecpf_vport_exists(esw->dev))
|
||||
mlx5_del_flow_rules(flows[mlx5_eswitch_ecpf_idx(esw)]);
|
||||
|
||||
if (mlx5_core_is_ecpf_esw_manager(esw->dev))
|
||||
mlx5_del_flow_rules(flows[MLX5_VPORT_PF]);
|
||||
if (mlx5_ecpf_vport_exists(esw->dev)) {
|
||||
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
|
||||
mlx5_del_flow_rules(flows[vport->index]);
|
||||
}
|
||||
|
||||
if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
|
||||
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
|
||||
mlx5_del_flow_rules(flows[vport->index]);
|
||||
}
|
||||
kvfree(flows);
|
||||
}
|
||||
|
||||
@ -1402,11 +1425,11 @@ static void esw_vport_tbl_put(struct mlx5_eswitch *esw)
|
||||
{
|
||||
struct mlx5_vport_tbl_attr attr;
|
||||
struct mlx5_vport *vport;
|
||||
int i;
|
||||
unsigned long i;
|
||||
|
||||
attr.chain = 0;
|
||||
attr.prio = 1;
|
||||
mlx5_esw_for_all_vports(esw, i, vport) {
|
||||
mlx5_esw_for_each_vport(esw, i, vport) {
|
||||
attr.vport = vport->vport;
|
||||
attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns;
|
||||
mlx5_esw_vporttbl_put(esw, &attr);
|
||||
@ -1418,11 +1441,11 @@ static int esw_vport_tbl_get(struct mlx5_eswitch *esw)
|
||||
struct mlx5_vport_tbl_attr attr;
|
||||
struct mlx5_flow_table *fdb;
|
||||
struct mlx5_vport *vport;
|
||||
int i;
|
||||
unsigned long i;
|
||||
|
||||
attr.chain = 0;
|
||||
attr.prio = 1;
|
||||
mlx5_esw_for_all_vports(esw, i, vport) {
|
||||
mlx5_esw_for_each_vport(esw, i, vport) {
|
||||
attr.vport = vport->vport;
|
||||
attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns;
|
||||
fdb = mlx5_esw_vporttbl_get(esw, &attr);
|
||||
@ -1910,12 +1933,12 @@ out:
|
||||
return flow_rule;
|
||||
}
|
||||
|
||||
|
||||
static int mlx5_eswitch_inline_mode_get(const struct mlx5_eswitch *esw, u8 *mode)
|
||||
static int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, u8 *mode)
|
||||
{
|
||||
u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
|
||||
struct mlx5_core_dev *dev = esw->dev;
|
||||
int vport;
|
||||
struct mlx5_vport *vport;
|
||||
unsigned long i;
|
||||
|
||||
if (!MLX5_CAP_GEN(dev, vport_group_manager))
|
||||
return -EOPNOTSUPP;
|
||||
@ -1936,8 +1959,8 @@ static int mlx5_eswitch_inline_mode_get(const struct mlx5_eswitch *esw, u8 *mode
|
||||
|
||||
query_vports:
|
||||
mlx5_query_nic_vport_min_inline(dev, esw->first_host_vport, &prev_mlx5_mode);
|
||||
mlx5_esw_for_each_host_func_vport(esw, vport, esw->esw_funcs.num_vfs) {
|
||||
mlx5_query_nic_vport_min_inline(dev, vport, &mlx5_mode);
|
||||
mlx5_esw_for_each_host_func_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
|
||||
mlx5_query_nic_vport_min_inline(dev, vport->vport, &mlx5_mode);
|
||||
if (prev_mlx5_mode != mlx5_mode)
|
||||
return -EINVAL;
|
||||
prev_mlx5_mode = mlx5_mode;
|
||||
@ -2080,34 +2103,82 @@ static int esw_offloads_start(struct mlx5_eswitch *esw,
|
||||
return err;
|
||||
}
|
||||
|
||||
static void mlx5_esw_offloads_rep_mark_set(struct mlx5_eswitch *esw,
|
||||
struct mlx5_eswitch_rep *rep,
|
||||
xa_mark_t mark)
|
||||
{
|
||||
bool mark_set;
|
||||
|
||||
/* Copy the mark from vport to its rep */
|
||||
mark_set = xa_get_mark(&esw->vports, rep->vport, mark);
|
||||
if (mark_set)
|
||||
xa_set_mark(&esw->offloads.vport_reps, rep->vport, mark);
|
||||
}
|
||||
|
||||
static int mlx5_esw_offloads_rep_init(struct mlx5_eswitch *esw, const struct mlx5_vport *vport)
|
||||
{
|
||||
struct mlx5_eswitch_rep *rep;
|
||||
int rep_type;
|
||||
int err;
|
||||
|
||||
rep = kzalloc(sizeof(*rep), GFP_KERNEL);
|
||||
if (!rep)
|
||||
return -ENOMEM;
|
||||
|
||||
rep->vport = vport->vport;
|
||||
rep->vport_index = vport->index;
|
||||
for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
|
||||
atomic_set(&rep->rep_data[rep_type].state, REP_UNREGISTERED);
|
||||
|
||||
err = xa_insert(&esw->offloads.vport_reps, rep->vport, rep, GFP_KERNEL);
|
||||
if (err)
|
||||
goto insert_err;
|
||||
|
||||
mlx5_esw_offloads_rep_mark_set(esw, rep, MLX5_ESW_VPT_HOST_FN);
|
||||
mlx5_esw_offloads_rep_mark_set(esw, rep, MLX5_ESW_VPT_VF);
|
||||
mlx5_esw_offloads_rep_mark_set(esw, rep, MLX5_ESW_VPT_SF);
|
||||
return 0;
|
||||
|
||||
insert_err:
|
||||
kfree(rep);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void mlx5_esw_offloads_rep_cleanup(struct mlx5_eswitch *esw,
|
||||
struct mlx5_eswitch_rep *rep)
|
||||
{
|
||||
xa_erase(&esw->offloads.vport_reps, rep->vport);
|
||||
kfree(rep);
|
||||
}
|
||||
|
||||
void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw)
|
||||
{
|
||||
kfree(esw->offloads.vport_reps);
|
||||
struct mlx5_eswitch_rep *rep;
|
||||
unsigned long i;
|
||||
|
||||
mlx5_esw_for_each_rep(esw, i, rep)
|
||||
mlx5_esw_offloads_rep_cleanup(esw, rep);
|
||||
xa_destroy(&esw->offloads.vport_reps);
|
||||
}
|
||||
|
||||
int esw_offloads_init_reps(struct mlx5_eswitch *esw)
|
||||
{
|
||||
int total_vports = esw->total_vports;
|
||||
struct mlx5_eswitch_rep *rep;
|
||||
int vport_index;
|
||||
u8 rep_type;
|
||||
struct mlx5_vport *vport;
|
||||
unsigned long i;
|
||||
int err;
|
||||
|
||||
esw->offloads.vport_reps = kcalloc(total_vports,
|
||||
sizeof(struct mlx5_eswitch_rep),
|
||||
GFP_KERNEL);
|
||||
if (!esw->offloads.vport_reps)
|
||||
return -ENOMEM;
|
||||
xa_init(&esw->offloads.vport_reps);
|
||||
|
||||
mlx5_esw_for_all_reps(esw, vport_index, rep) {
|
||||
rep->vport = mlx5_eswitch_index_to_vport_num(esw, vport_index);
|
||||
rep->vport_index = vport_index;
|
||||
|
||||
for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
|
||||
atomic_set(&rep->rep_data[rep_type].state,
|
||||
REP_UNREGISTERED);
|
||||
mlx5_esw_for_each_vport(esw, i, vport) {
|
||||
err = mlx5_esw_offloads_rep_init(esw, vport);
|
||||
if (err)
|
||||
goto err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err:
|
||||
esw_offloads_cleanup_reps(esw);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw,
|
||||
@ -2121,7 +2192,7 @@ static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw,
|
||||
static void __unload_reps_sf_vport(struct mlx5_eswitch *esw, u8 rep_type)
|
||||
{
|
||||
struct mlx5_eswitch_rep *rep;
|
||||
int i;
|
||||
unsigned long i;
|
||||
|
||||
mlx5_esw_for_each_sf_rep(esw, i, rep)
|
||||
__esw_offloads_unload_rep(esw, rep, rep_type);
|
||||
@ -2130,11 +2201,11 @@ static void __unload_reps_sf_vport(struct mlx5_eswitch *esw, u8 rep_type)
|
||||
static void __unload_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type)
|
||||
{
|
||||
struct mlx5_eswitch_rep *rep;
|
||||
int i;
|
||||
unsigned long i;
|
||||
|
||||
__unload_reps_sf_vport(esw, rep_type);
|
||||
|
||||
mlx5_esw_for_each_vf_rep_reverse(esw, i, rep, esw->esw_funcs.num_vfs)
|
||||
mlx5_esw_for_each_vf_rep(esw, i, rep)
|
||||
__esw_offloads_unload_rep(esw, rep, rep_type);
|
||||
|
||||
if (mlx5_ecpf_vport_exists(esw->dev)) {
|
||||
@ -2421,25 +2492,25 @@ static void esw_offloads_vport_metadata_cleanup(struct mlx5_eswitch *esw,
|
||||
static void esw_offloads_metadata_uninit(struct mlx5_eswitch *esw)
|
||||
{
|
||||
struct mlx5_vport *vport;
|
||||
int i;
|
||||
unsigned long i;
|
||||
|
||||
if (!mlx5_eswitch_vport_match_metadata_enabled(esw))
|
||||
return;
|
||||
|
||||
mlx5_esw_for_all_vports_reverse(esw, i, vport)
|
||||
mlx5_esw_for_each_vport(esw, i, vport)
|
||||
esw_offloads_vport_metadata_cleanup(esw, vport);
|
||||
}
|
||||
|
||||
static int esw_offloads_metadata_init(struct mlx5_eswitch *esw)
|
||||
{
|
||||
struct mlx5_vport *vport;
|
||||
unsigned long i;
|
||||
int err;
|
||||
int i;
|
||||
|
||||
if (!mlx5_eswitch_vport_match_metadata_enabled(esw))
|
||||
return 0;
|
||||
|
||||
mlx5_esw_for_all_vports(esw, i, vport) {
|
||||
mlx5_esw_for_each_vport(esw, i, vport) {
|
||||
err = esw_offloads_vport_metadata_setup(esw, vport);
|
||||
if (err)
|
||||
goto metadata_err;
|
||||
@ -2676,11 +2747,25 @@ static int mlx5_esw_host_number_init(struct mlx5_eswitch *esw)
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool mlx5_esw_offloads_controller_valid(const struct mlx5_eswitch *esw, u32 controller)
|
||||
{
|
||||
/* Local controller is always valid */
|
||||
if (controller == 0)
|
||||
return true;
|
||||
|
||||
if (!mlx5_core_is_ecpf_esw_manager(esw->dev))
|
||||
return false;
|
||||
|
||||
/* External host number starts with zero in device */
|
||||
return (controller == esw->offloads.host_number + 1);
|
||||
}
|
||||
|
||||
int esw_offloads_enable(struct mlx5_eswitch *esw)
|
||||
{
|
||||
struct mapping_ctx *reg_c0_obj_pool;
|
||||
struct mlx5_vport *vport;
|
||||
int err, i;
|
||||
unsigned long i;
|
||||
int err;
|
||||
|
||||
if (MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat) &&
|
||||
MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, decap))
|
||||
@ -2926,13 +3011,44 @@ unlock:
|
||||
return err;
|
||||
}
|
||||
|
||||
static int mlx5_esw_vports_inline_set(struct mlx5_eswitch *esw, u8 mlx5_mode,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct mlx5_core_dev *dev = esw->dev;
|
||||
struct mlx5_vport *vport;
|
||||
u16 err_vport_num = 0;
|
||||
unsigned long i;
|
||||
int err = 0;
|
||||
|
||||
mlx5_esw_for_each_host_func_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
|
||||
err = mlx5_modify_nic_vport_min_inline(dev, vport->vport, mlx5_mode);
|
||||
if (err) {
|
||||
err_vport_num = vport->vport;
|
||||
NL_SET_ERR_MSG_MOD(extack,
|
||||
"Failed to set min inline on vport");
|
||||
goto revert_inline_mode;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
|
||||
revert_inline_mode:
|
||||
mlx5_esw_for_each_host_func_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
|
||||
if (vport->vport == err_vport_num)
|
||||
break;
|
||||
mlx5_modify_nic_vport_min_inline(dev,
|
||||
vport->vport,
|
||||
esw->offloads.inline_mode);
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct mlx5_core_dev *dev = devlink_priv(devlink);
|
||||
int err, vport, num_vport;
|
||||
struct mlx5_eswitch *esw;
|
||||
u8 mlx5_mode;
|
||||
int err;
|
||||
|
||||
esw = mlx5_devlink_eswitch_get(devlink);
|
||||
if (IS_ERR(esw))
|
||||
@ -2967,25 +3083,14 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
mlx5_esw_for_each_host_func_vport(esw, vport, esw->esw_funcs.num_vfs) {
|
||||
err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode);
|
||||
if (err) {
|
||||
NL_SET_ERR_MSG_MOD(extack,
|
||||
"Failed to set min inline on vport");
|
||||
goto revert_inline_mode;
|
||||
}
|
||||
}
|
||||
err = mlx5_esw_vports_inline_set(esw, mlx5_mode, extack);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
esw->offloads.inline_mode = mlx5_mode;
|
||||
up_write(&esw->mode_lock);
|
||||
return 0;
|
||||
|
||||
revert_inline_mode:
|
||||
num_vport = --vport;
|
||||
mlx5_esw_for_each_host_func_vport_reverse(esw, vport, num_vport)
|
||||
mlx5_modify_nic_vport_min_inline(dev,
|
||||
vport,
|
||||
esw->offloads.inline_mode);
|
||||
out:
|
||||
up_write(&esw->mode_lock);
|
||||
return err;
|
||||
@ -3116,11 +3221,11 @@ void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw,
|
||||
{
|
||||
struct mlx5_eswitch_rep_data *rep_data;
|
||||
struct mlx5_eswitch_rep *rep;
|
||||
int i;
|
||||
unsigned long i;
|
||||
|
||||
esw->offloads.rep_ops[rep_type] = ops;
|
||||
mlx5_esw_for_all_reps(esw, i, rep) {
|
||||
if (likely(mlx5_eswitch_vport_has_rep(esw, i))) {
|
||||
mlx5_esw_for_each_rep(esw, i, rep) {
|
||||
if (likely(mlx5_eswitch_vport_has_rep(esw, rep->vport))) {
|
||||
rep->esw = esw;
|
||||
rep_data = &rep->rep_data[rep_type];
|
||||
atomic_set(&rep_data->state, REP_REGISTERED);
|
||||
@ -3132,12 +3237,12 @@ EXPORT_SYMBOL(mlx5_eswitch_register_vport_reps);
|
||||
void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type)
|
||||
{
|
||||
struct mlx5_eswitch_rep *rep;
|
||||
int i;
|
||||
unsigned long i;
|
||||
|
||||
if (esw->mode == MLX5_ESWITCH_OFFLOADS)
|
||||
__unload_reps_all_vport(esw, rep_type);
|
||||
|
||||
mlx5_esw_for_all_reps(esw, i, rep)
|
||||
mlx5_esw_for_each_rep(esw, i, rep)
|
||||
atomic_set(&rep->rep_data[rep_type].state, REP_UNREGISTERED);
|
||||
}
|
||||
EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_reps);
|
||||
@ -3178,12 +3283,6 @@ struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw,
|
||||
}
|
||||
EXPORT_SYMBOL(mlx5_eswitch_vport_rep);
|
||||
|
||||
bool mlx5_eswitch_is_vf_vport(const struct mlx5_eswitch *esw, u16 vport_num)
|
||||
{
|
||||
return vport_num >= MLX5_VPORT_FIRST_VF &&
|
||||
vport_num <= esw->dev->priv.sriov.max_vfs;
|
||||
}
|
||||
|
||||
bool mlx5_eswitch_reg_c1_loopback_enabled(const struct mlx5_eswitch *esw)
|
||||
{
|
||||
return !!(esw->flags & MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED);
|
||||
@ -3209,7 +3308,7 @@ u32 mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw,
|
||||
EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_match);
|
||||
|
||||
int mlx5_esw_offloads_sf_vport_enable(struct mlx5_eswitch *esw, struct devlink_port *dl_port,
|
||||
u16 vport_num, u32 sfnum)
|
||||
u16 vport_num, u32 controller, u32 sfnum)
|
||||
{
|
||||
int err;
|
||||
|
||||
@ -3217,7 +3316,7 @@ int mlx5_esw_offloads_sf_vport_enable(struct mlx5_eswitch *esw, struct devlink_p
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = mlx5_esw_devlink_sf_port_register(esw, dl_port, vport_num, sfnum);
|
||||
err = mlx5_esw_devlink_sf_port_register(esw, dl_port, vport_num, controller, sfnum);
|
||||
if (err)
|
||||
goto devlink_err;
|
||||
|
||||
|
@ -148,9 +148,19 @@ mlx5_sf_dev_state_change_handler(struct notifier_block *nb, unsigned long event_
|
||||
struct mlx5_sf_dev_table *table = container_of(nb, struct mlx5_sf_dev_table, nb);
|
||||
const struct mlx5_vhca_state_event *event = data;
|
||||
struct mlx5_sf_dev *sf_dev;
|
||||
u16 max_functions;
|
||||
u16 sf_index;
|
||||
u16 base_id;
|
||||
|
||||
sf_index = event->function_id - MLX5_CAP_GEN(table->dev, sf_base_id);
|
||||
max_functions = mlx5_sf_max_functions(table->dev);
|
||||
if (!max_functions)
|
||||
return 0;
|
||||
|
||||
base_id = MLX5_CAP_GEN(table->dev, sf_base_id);
|
||||
if (event->function_id < base_id || event->function_id >= (base_id + max_functions))
|
||||
return 0;
|
||||
|
||||
sf_index = event->function_id - base_id;
|
||||
sf_dev = xa_load(&table->devices, sf_index);
|
||||
switch (event->new_vhca_state) {
|
||||
case MLX5_VHCA_STATE_ALLOCATED:
|
||||
|
@ -12,6 +12,7 @@
|
||||
struct mlx5_sf {
|
||||
struct devlink_port dl_port;
|
||||
unsigned int port_index;
|
||||
u32 controller;
|
||||
u16 id;
|
||||
u16 hw_fn_id;
|
||||
u16 hw_state;
|
||||
@ -58,7 +59,8 @@ static void mlx5_sf_id_erase(struct mlx5_sf_table *table, struct mlx5_sf *sf)
|
||||
}
|
||||
|
||||
static struct mlx5_sf *
|
||||
mlx5_sf_alloc(struct mlx5_sf_table *table, u32 sfnum, struct netlink_ext_ack *extack)
|
||||
mlx5_sf_alloc(struct mlx5_sf_table *table, struct mlx5_eswitch *esw,
|
||||
u32 controller, u32 sfnum, struct netlink_ext_ack *extack)
|
||||
{
|
||||
unsigned int dl_port_index;
|
||||
struct mlx5_sf *sf;
|
||||
@ -66,7 +68,12 @@ mlx5_sf_alloc(struct mlx5_sf_table *table, u32 sfnum, struct netlink_ext_ack *ex
|
||||
int id_err;
|
||||
int err;
|
||||
|
||||
id_err = mlx5_sf_hw_table_sf_alloc(table->dev, sfnum);
|
||||
if (!mlx5_esw_offloads_controller_valid(esw, controller)) {
|
||||
NL_SET_ERR_MSG_MOD(extack, "Invalid controller number");
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
id_err = mlx5_sf_hw_table_sf_alloc(table->dev, controller, sfnum);
|
||||
if (id_err < 0) {
|
||||
err = id_err;
|
||||
goto id_err;
|
||||
@ -78,11 +85,12 @@ mlx5_sf_alloc(struct mlx5_sf_table *table, u32 sfnum, struct netlink_ext_ack *ex
|
||||
goto alloc_err;
|
||||
}
|
||||
sf->id = id_err;
|
||||
hw_fn_id = mlx5_sf_sw_to_hw_id(table->dev, sf->id);
|
||||
hw_fn_id = mlx5_sf_sw_to_hw_id(table->dev, controller, sf->id);
|
||||
dl_port_index = mlx5_esw_vport_to_devlink_port_index(table->dev, hw_fn_id);
|
||||
sf->port_index = dl_port_index;
|
||||
sf->hw_fn_id = hw_fn_id;
|
||||
sf->hw_state = MLX5_VHCA_STATE_ALLOCATED;
|
||||
sf->controller = controller;
|
||||
|
||||
err = mlx5_sf_id_insert(table, sf);
|
||||
if (err)
|
||||
@ -93,7 +101,7 @@ mlx5_sf_alloc(struct mlx5_sf_table *table, u32 sfnum, struct netlink_ext_ack *ex
|
||||
insert_err:
|
||||
kfree(sf);
|
||||
alloc_err:
|
||||
mlx5_sf_hw_table_sf_free(table->dev, id_err);
|
||||
mlx5_sf_hw_table_sf_free(table->dev, controller, id_err);
|
||||
id_err:
|
||||
if (err == -EEXIST)
|
||||
NL_SET_ERR_MSG_MOD(extack, "SF already exist. Choose different sfnum");
|
||||
@ -103,7 +111,7 @@ id_err:
|
||||
static void mlx5_sf_free(struct mlx5_sf_table *table, struct mlx5_sf *sf)
|
||||
{
|
||||
mlx5_sf_id_erase(table, sf);
|
||||
mlx5_sf_hw_table_sf_free(table->dev, sf->id);
|
||||
mlx5_sf_hw_table_sf_free(table->dev, sf->controller, sf->id);
|
||||
kfree(sf);
|
||||
}
|
||||
|
||||
@ -272,12 +280,12 @@ static int mlx5_sf_add(struct mlx5_core_dev *dev, struct mlx5_sf_table *table,
|
||||
struct mlx5_sf *sf;
|
||||
int err;
|
||||
|
||||
sf = mlx5_sf_alloc(table, new_attr->sfnum, extack);
|
||||
sf = mlx5_sf_alloc(table, esw, new_attr->controller, new_attr->sfnum, extack);
|
||||
if (IS_ERR(sf))
|
||||
return PTR_ERR(sf);
|
||||
|
||||
err = mlx5_esw_offloads_sf_vport_enable(esw, &sf->dl_port, sf->hw_fn_id,
|
||||
new_attr->sfnum);
|
||||
new_attr->controller, new_attr->sfnum);
|
||||
if (err)
|
||||
goto esw_err;
|
||||
*new_port_index = sf->port_index;
|
||||
@ -306,7 +314,8 @@ mlx5_sf_new_check_attr(struct mlx5_core_dev *dev, const struct devlink_port_new_
|
||||
"User must provide unique sfnum. Driver does not support auto assignment");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
if (new_attr->controller_valid && new_attr->controller) {
|
||||
if (new_attr->controller_valid && new_attr->controller &&
|
||||
!mlx5_core_is_ecpf_esw_manager(dev)) {
|
||||
NL_SET_ERR_MSG_MOD(extack, "External controller is unsupported");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
@ -352,10 +361,10 @@ static void mlx5_sf_dealloc(struct mlx5_sf_table *table, struct mlx5_sf *sf)
|
||||
* firmware gives confirmation that it is detached by the driver.
|
||||
*/
|
||||
mlx5_cmd_sf_disable_hca(table->dev, sf->hw_fn_id);
|
||||
mlx5_sf_hw_table_sf_deferred_free(table->dev, sf->id);
|
||||
mlx5_sf_hw_table_sf_deferred_free(table->dev, sf->controller, sf->id);
|
||||
kfree(sf);
|
||||
} else {
|
||||
mlx5_sf_hw_table_sf_deferred_free(table->dev, sf->id);
|
||||
mlx5_sf_hw_table_sf_deferred_free(table->dev, sf->controller, sf->id);
|
||||
kfree(sf);
|
||||
}
|
||||
}
|
||||
@ -437,9 +446,6 @@ sf_err:
|
||||
|
||||
static void mlx5_sf_table_enable(struct mlx5_sf_table *table)
|
||||
{
|
||||
if (!mlx5_sf_max_functions(table->dev))
|
||||
return;
|
||||
|
||||
init_completion(&table->disable_complete);
|
||||
refcount_set(&table->refcount, 1);
|
||||
}
|
||||
@ -462,9 +468,6 @@ static void mlx5_sf_deactivate_all(struct mlx5_sf_table *table)
|
||||
|
||||
static void mlx5_sf_table_disable(struct mlx5_sf_table *table)
|
||||
{
|
||||
if (!mlx5_sf_max_functions(table->dev))
|
||||
return;
|
||||
|
||||
if (!refcount_read(&table->refcount))
|
||||
return;
|
||||
|
||||
@ -498,7 +501,8 @@ static int mlx5_sf_esw_event(struct notifier_block *nb, unsigned long event, voi
|
||||
|
||||
static bool mlx5_sf_table_supported(const struct mlx5_core_dev *dev)
|
||||
{
|
||||
return dev->priv.eswitch && MLX5_ESWITCH_MANAGER(dev) && mlx5_sf_supported(dev);
|
||||
return dev->priv.eswitch && MLX5_ESWITCH_MANAGER(dev) &&
|
||||
mlx5_sf_hw_table_supported(dev);
|
||||
}
|
||||
|
||||
int mlx5_sf_table_init(struct mlx5_core_dev *dev)
|
||||
|
@ -8,6 +8,7 @@
|
||||
#include "ecpf.h"
|
||||
#include "vhca_event.h"
|
||||
#include "mlx5_core.h"
|
||||
#include "eswitch.h"
|
||||
|
||||
struct mlx5_sf_hw {
|
||||
u32 usr_sfnum;
|
||||
@ -15,59 +16,113 @@ struct mlx5_sf_hw {
|
||||
u8 pending_delete: 1;
|
||||
};
|
||||
|
||||
struct mlx5_sf_hw_table {
|
||||
struct mlx5_core_dev *dev;
|
||||
struct mlx5_sf_hwc_table {
|
||||
struct mlx5_sf_hw *sfs;
|
||||
int max_local_functions;
|
||||
struct mutex table_lock; /* Serializes sf deletion and vhca state change handler. */
|
||||
struct notifier_block vhca_nb;
|
||||
int max_fn;
|
||||
u16 start_fn_id;
|
||||
};
|
||||
|
||||
u16 mlx5_sf_sw_to_hw_id(const struct mlx5_core_dev *dev, u16 sw_id)
|
||||
enum mlx5_sf_hwc_index {
|
||||
MLX5_SF_HWC_LOCAL,
|
||||
MLX5_SF_HWC_EXTERNAL,
|
||||
MLX5_SF_HWC_MAX,
|
||||
};
|
||||
|
||||
struct mlx5_sf_hw_table {
|
||||
struct mlx5_core_dev *dev;
|
||||
struct mutex table_lock; /* Serializes sf deletion and vhca state change handler. */
|
||||
struct notifier_block vhca_nb;
|
||||
struct mlx5_sf_hwc_table hwc[MLX5_SF_HWC_MAX];
|
||||
};
|
||||
|
||||
static struct mlx5_sf_hwc_table *
|
||||
mlx5_sf_controller_to_hwc(struct mlx5_core_dev *dev, u32 controller)
|
||||
{
|
||||
return sw_id + mlx5_sf_start_function_id(dev);
|
||||
int idx = !!controller;
|
||||
|
||||
return &dev->priv.sf_hw_table->hwc[idx];
|
||||
}
|
||||
|
||||
static u16 mlx5_sf_hw_to_sw_id(const struct mlx5_core_dev *dev, u16 hw_id)
|
||||
u16 mlx5_sf_sw_to_hw_id(struct mlx5_core_dev *dev, u32 controller, u16 sw_id)
|
||||
{
|
||||
return hw_id - mlx5_sf_start_function_id(dev);
|
||||
struct mlx5_sf_hwc_table *hwc;
|
||||
|
||||
hwc = mlx5_sf_controller_to_hwc(dev, controller);
|
||||
return hwc->start_fn_id + sw_id;
|
||||
}
|
||||
|
||||
int mlx5_sf_hw_table_sf_alloc(struct mlx5_core_dev *dev, u32 usr_sfnum)
|
||||
static u16 mlx5_sf_hw_to_sw_id(struct mlx5_sf_hwc_table *hwc, u16 hw_id)
|
||||
{
|
||||
return hw_id - hwc->start_fn_id;
|
||||
}
|
||||
|
||||
static struct mlx5_sf_hwc_table *
|
||||
mlx5_sf_table_fn_to_hwc(struct mlx5_sf_hw_table *table, u16 fn_id)
|
||||
{
|
||||
struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table;
|
||||
int sw_id = -ENOSPC;
|
||||
u16 hw_fn_id;
|
||||
int err;
|
||||
int i;
|
||||
|
||||
if (!table->max_local_functions)
|
||||
for (i = 0; i < ARRAY_SIZE(table->hwc); i++) {
|
||||
if (table->hwc[i].max_fn &&
|
||||
fn_id >= table->hwc[i].start_fn_id &&
|
||||
fn_id < (table->hwc[i].start_fn_id + table->hwc[i].max_fn))
|
||||
return &table->hwc[i];
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int mlx5_sf_hw_table_id_alloc(struct mlx5_sf_hw_table *table, u32 controller,
|
||||
u32 usr_sfnum)
|
||||
{
|
||||
struct mlx5_sf_hwc_table *hwc;
|
||||
int i;
|
||||
|
||||
hwc = mlx5_sf_controller_to_hwc(table->dev, controller);
|
||||
if (!hwc->sfs)
|
||||
return -ENOSPC;
|
||||
|
||||
/* Check if sf with same sfnum already exists or not. */
|
||||
for (i = 0; i < hwc->max_fn; i++) {
|
||||
if (hwc->sfs[i].allocated && hwc->sfs[i].usr_sfnum == usr_sfnum)
|
||||
return -EEXIST;
|
||||
}
|
||||
/* Find the free entry and allocate the entry from the array */
|
||||
for (i = 0; i < hwc->max_fn; i++) {
|
||||
if (!hwc->sfs[i].allocated) {
|
||||
hwc->sfs[i].usr_sfnum = usr_sfnum;
|
||||
hwc->sfs[i].allocated = true;
|
||||
return i;
|
||||
}
|
||||
}
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
static void mlx5_sf_hw_table_id_free(struct mlx5_sf_hw_table *table, u32 controller, int id)
|
||||
{
|
||||
struct mlx5_sf_hwc_table *hwc;
|
||||
|
||||
hwc = mlx5_sf_controller_to_hwc(table->dev, controller);
|
||||
hwc->sfs[id].allocated = false;
|
||||
hwc->sfs[id].pending_delete = false;
|
||||
}
|
||||
|
||||
int mlx5_sf_hw_table_sf_alloc(struct mlx5_core_dev *dev, u32 controller, u32 usr_sfnum)
|
||||
{
|
||||
struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table;
|
||||
u16 hw_fn_id;
|
||||
int sw_id;
|
||||
int err;
|
||||
|
||||
if (!table)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
mutex_lock(&table->table_lock);
|
||||
/* Check if sf with same sfnum already exists or not. */
|
||||
for (i = 0; i < table->max_local_functions; i++) {
|
||||
if (table->sfs[i].allocated && table->sfs[i].usr_sfnum == usr_sfnum) {
|
||||
err = -EEXIST;
|
||||
goto exist_err;
|
||||
}
|
||||
}
|
||||
|
||||
/* Find the free entry and allocate the entry from the array */
|
||||
for (i = 0; i < table->max_local_functions; i++) {
|
||||
if (!table->sfs[i].allocated) {
|
||||
table->sfs[i].usr_sfnum = usr_sfnum;
|
||||
table->sfs[i].allocated = true;
|
||||
sw_id = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (sw_id == -ENOSPC) {
|
||||
err = -ENOSPC;
|
||||
sw_id = mlx5_sf_hw_table_id_alloc(table, controller, usr_sfnum);
|
||||
if (sw_id < 0) {
|
||||
err = sw_id;
|
||||
goto exist_err;
|
||||
}
|
||||
|
||||
hw_fn_id = mlx5_sf_sw_to_hw_id(dev, sw_id);
|
||||
hw_fn_id = mlx5_sf_sw_to_hw_id(dev, controller, sw_id);
|
||||
err = mlx5_cmd_alloc_sf(dev, hw_fn_id);
|
||||
if (err)
|
||||
goto err;
|
||||
@ -76,47 +131,58 @@ int mlx5_sf_hw_table_sf_alloc(struct mlx5_core_dev *dev, u32 usr_sfnum)
|
||||
if (err)
|
||||
goto vhca_err;
|
||||
|
||||
if (controller) {
|
||||
/* If this SF is for external controller, SF manager
|
||||
* needs to arm firmware to receive the events.
|
||||
*/
|
||||
err = mlx5_vhca_event_arm(dev, hw_fn_id);
|
||||
if (err)
|
||||
goto vhca_err;
|
||||
}
|
||||
|
||||
mutex_unlock(&table->table_lock);
|
||||
return sw_id;
|
||||
|
||||
vhca_err:
|
||||
mlx5_cmd_dealloc_sf(dev, hw_fn_id);
|
||||
err:
|
||||
table->sfs[i].allocated = false;
|
||||
mlx5_sf_hw_table_id_free(table, controller, sw_id);
|
||||
exist_err:
|
||||
mutex_unlock(&table->table_lock);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void _mlx5_sf_hw_id_free(struct mlx5_core_dev *dev, u16 id)
|
||||
void mlx5_sf_hw_table_sf_free(struct mlx5_core_dev *dev, u32 controller, u16 id)
|
||||
{
|
||||
struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table;
|
||||
u16 hw_fn_id;
|
||||
|
||||
hw_fn_id = mlx5_sf_sw_to_hw_id(dev, id);
|
||||
mlx5_cmd_dealloc_sf(dev, hw_fn_id);
|
||||
table->sfs[id].allocated = false;
|
||||
table->sfs[id].pending_delete = false;
|
||||
}
|
||||
|
||||
void mlx5_sf_hw_table_sf_free(struct mlx5_core_dev *dev, u16 id)
|
||||
{
|
||||
struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table;
|
||||
|
||||
mutex_lock(&table->table_lock);
|
||||
_mlx5_sf_hw_id_free(dev, id);
|
||||
hw_fn_id = mlx5_sf_sw_to_hw_id(dev, controller, id);
|
||||
mlx5_cmd_dealloc_sf(dev, hw_fn_id);
|
||||
mlx5_sf_hw_table_id_free(table, controller, id);
|
||||
mutex_unlock(&table->table_lock);
|
||||
}
|
||||
|
||||
void mlx5_sf_hw_table_sf_deferred_free(struct mlx5_core_dev *dev, u16 id)
|
||||
static void mlx5_sf_hw_table_hwc_sf_free(struct mlx5_core_dev *dev,
|
||||
struct mlx5_sf_hwc_table *hwc, int idx)
|
||||
{
|
||||
mlx5_cmd_dealloc_sf(dev, hwc->start_fn_id + idx);
|
||||
hwc->sfs[idx].allocated = false;
|
||||
hwc->sfs[idx].pending_delete = false;
|
||||
}
|
||||
|
||||
void mlx5_sf_hw_table_sf_deferred_free(struct mlx5_core_dev *dev, u32 controller, u16 id)
|
||||
{
|
||||
struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table;
|
||||
u32 out[MLX5_ST_SZ_DW(query_vhca_state_out)] = {};
|
||||
struct mlx5_sf_hwc_table *hwc;
|
||||
u16 hw_fn_id;
|
||||
u8 state;
|
||||
int err;
|
||||
|
||||
hw_fn_id = mlx5_sf_sw_to_hw_id(dev, id);
|
||||
hw_fn_id = mlx5_sf_sw_to_hw_id(dev, controller, id);
|
||||
hwc = mlx5_sf_controller_to_hwc(dev, controller);
|
||||
mutex_lock(&table->table_lock);
|
||||
err = mlx5_cmd_query_vhca_state(dev, hw_fn_id, out, sizeof(out));
|
||||
if (err)
|
||||
@ -124,53 +190,102 @@ void mlx5_sf_hw_table_sf_deferred_free(struct mlx5_core_dev *dev, u16 id)
|
||||
state = MLX5_GET(query_vhca_state_out, out, vhca_state_context.vhca_state);
|
||||
if (state == MLX5_VHCA_STATE_ALLOCATED) {
|
||||
mlx5_cmd_dealloc_sf(dev, hw_fn_id);
|
||||
table->sfs[id].allocated = false;
|
||||
hwc->sfs[id].allocated = false;
|
||||
} else {
|
||||
table->sfs[id].pending_delete = true;
|
||||
hwc->sfs[id].pending_delete = true;
|
||||
}
|
||||
err:
|
||||
mutex_unlock(&table->table_lock);
|
||||
}
|
||||
|
||||
static void mlx5_sf_hw_dealloc_all(struct mlx5_sf_hw_table *table)
|
||||
static void mlx5_sf_hw_table_hwc_dealloc_all(struct mlx5_core_dev *dev,
|
||||
struct mlx5_sf_hwc_table *hwc)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < table->max_local_functions; i++) {
|
||||
if (table->sfs[i].allocated)
|
||||
_mlx5_sf_hw_id_free(table->dev, i);
|
||||
for (i = 0; i < hwc->max_fn; i++) {
|
||||
if (hwc->sfs[i].allocated)
|
||||
mlx5_sf_hw_table_hwc_sf_free(dev, hwc, i);
|
||||
}
|
||||
}
|
||||
|
||||
static void mlx5_sf_hw_table_dealloc_all(struct mlx5_sf_hw_table *table)
|
||||
{
|
||||
mlx5_sf_hw_table_hwc_dealloc_all(table->dev, &table->hwc[MLX5_SF_HWC_EXTERNAL]);
|
||||
mlx5_sf_hw_table_hwc_dealloc_all(table->dev, &table->hwc[MLX5_SF_HWC_LOCAL]);
|
||||
}
|
||||
|
||||
static int mlx5_sf_hw_table_hwc_init(struct mlx5_sf_hwc_table *hwc, u16 max_fn, u16 base_id)
|
||||
{
|
||||
struct mlx5_sf_hw *sfs;
|
||||
|
||||
if (!max_fn)
|
||||
return 0;
|
||||
|
||||
sfs = kcalloc(max_fn, sizeof(*sfs), GFP_KERNEL);
|
||||
if (!sfs)
|
||||
return -ENOMEM;
|
||||
|
||||
hwc->sfs = sfs;
|
||||
hwc->max_fn = max_fn;
|
||||
hwc->start_fn_id = base_id;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void mlx5_sf_hw_table_hwc_cleanup(struct mlx5_sf_hwc_table *hwc)
|
||||
{
|
||||
kfree(hwc->sfs);
|
||||
}
|
||||
|
||||
int mlx5_sf_hw_table_init(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct mlx5_sf_hw_table *table;
|
||||
struct mlx5_sf_hw *sfs;
|
||||
int max_functions;
|
||||
u16 max_ext_fn = 0;
|
||||
u16 ext_base_id;
|
||||
u16 max_fn = 0;
|
||||
u16 base_id;
|
||||
int err;
|
||||
|
||||
if (!mlx5_sf_supported(dev) || !mlx5_vhca_event_supported(dev))
|
||||
if (!mlx5_vhca_event_supported(dev))
|
||||
return 0;
|
||||
|
||||
if (mlx5_sf_supported(dev))
|
||||
max_fn = mlx5_sf_max_functions(dev);
|
||||
|
||||
err = mlx5_esw_sf_max_hpf_functions(dev, &max_ext_fn, &ext_base_id);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (!max_fn && !max_ext_fn)
|
||||
return 0;
|
||||
|
||||
max_functions = mlx5_sf_max_functions(dev);
|
||||
table = kzalloc(sizeof(*table), GFP_KERNEL);
|
||||
if (!table)
|
||||
return -ENOMEM;
|
||||
|
||||
sfs = kcalloc(max_functions, sizeof(*sfs), GFP_KERNEL);
|
||||
if (!sfs)
|
||||
goto table_err;
|
||||
|
||||
mutex_init(&table->table_lock);
|
||||
table->dev = dev;
|
||||
table->sfs = sfs;
|
||||
table->max_local_functions = max_functions;
|
||||
dev->priv.sf_hw_table = table;
|
||||
mlx5_core_dbg(dev, "SF HW table: max sfs = %d\n", max_functions);
|
||||
|
||||
base_id = mlx5_sf_start_function_id(dev);
|
||||
err = mlx5_sf_hw_table_hwc_init(&table->hwc[MLX5_SF_HWC_LOCAL], max_fn, base_id);
|
||||
if (err)
|
||||
goto table_err;
|
||||
|
||||
err = mlx5_sf_hw_table_hwc_init(&table->hwc[MLX5_SF_HWC_EXTERNAL],
|
||||
max_ext_fn, ext_base_id);
|
||||
if (err)
|
||||
goto ext_err;
|
||||
|
||||
mlx5_core_dbg(dev, "SF HW table: max sfs = %d, ext sfs = %d\n", max_fn, max_ext_fn);
|
||||
return 0;
|
||||
|
||||
ext_err:
|
||||
mlx5_sf_hw_table_hwc_cleanup(&table->hwc[MLX5_SF_HWC_LOCAL]);
|
||||
table_err:
|
||||
mutex_destroy(&table->table_lock);
|
||||
kfree(table);
|
||||
return -ENOMEM;
|
||||
return err;
|
||||
}
|
||||
|
||||
void mlx5_sf_hw_table_cleanup(struct mlx5_core_dev *dev)
|
||||
@ -181,7 +296,8 @@ void mlx5_sf_hw_table_cleanup(struct mlx5_core_dev *dev)
|
||||
return;
|
||||
|
||||
mutex_destroy(&table->table_lock);
|
||||
kfree(table->sfs);
|
||||
mlx5_sf_hw_table_hwc_cleanup(&table->hwc[MLX5_SF_HWC_EXTERNAL]);
|
||||
mlx5_sf_hw_table_hwc_cleanup(&table->hwc[MLX5_SF_HWC_LOCAL]);
|
||||
kfree(table);
|
||||
}
|
||||
|
||||
@ -189,21 +305,26 @@ static int mlx5_sf_hw_vhca_event(struct notifier_block *nb, unsigned long opcode
|
||||
{
|
||||
struct mlx5_sf_hw_table *table = container_of(nb, struct mlx5_sf_hw_table, vhca_nb);
|
||||
const struct mlx5_vhca_state_event *event = data;
|
||||
struct mlx5_sf_hwc_table *hwc;
|
||||
struct mlx5_sf_hw *sf_hw;
|
||||
u16 sw_id;
|
||||
|
||||
if (event->new_vhca_state != MLX5_VHCA_STATE_ALLOCATED)
|
||||
return 0;
|
||||
|
||||
sw_id = mlx5_sf_hw_to_sw_id(table->dev, event->function_id);
|
||||
sf_hw = &table->sfs[sw_id];
|
||||
hwc = mlx5_sf_table_fn_to_hwc(table, event->function_id);
|
||||
if (!hwc)
|
||||
return 0;
|
||||
|
||||
sw_id = mlx5_sf_hw_to_sw_id(hwc, event->function_id);
|
||||
sf_hw = &hwc->sfs[sw_id];
|
||||
|
||||
mutex_lock(&table->table_lock);
|
||||
/* SF driver notified through firmware that SF is finally detached.
|
||||
* Hence recycle the sf hardware id for reuse.
|
||||
*/
|
||||
if (sf_hw->allocated && sf_hw->pending_delete)
|
||||
_mlx5_sf_hw_id_free(table->dev, sw_id);
|
||||
mlx5_sf_hw_table_hwc_sf_free(table->dev, hwc, sw_id);
|
||||
mutex_unlock(&table->table_lock);
|
||||
return 0;
|
||||
}
|
||||
@ -228,5 +349,10 @@ void mlx5_sf_hw_table_destroy(struct mlx5_core_dev *dev)
|
||||
|
||||
mlx5_vhca_event_notifier_unregister(dev, &table->vhca_nb);
|
||||
/* Dealloc SFs whose firmware event has been missed. */
|
||||
mlx5_sf_hw_dealloc_all(table);
|
||||
mlx5_sf_hw_table_dealloc_all(table);
|
||||
}
|
||||
|
||||
bool mlx5_sf_hw_table_supported(const struct mlx5_core_dev *dev)
|
||||
{
|
||||
return !!dev->priv.sf_hw_table;
|
||||
}
|
||||
|
@ -12,10 +12,11 @@ int mlx5_cmd_dealloc_sf(struct mlx5_core_dev *dev, u16 function_id);
|
||||
int mlx5_cmd_sf_enable_hca(struct mlx5_core_dev *dev, u16 func_id);
|
||||
int mlx5_cmd_sf_disable_hca(struct mlx5_core_dev *dev, u16 func_id);
|
||||
|
||||
u16 mlx5_sf_sw_to_hw_id(const struct mlx5_core_dev *dev, u16 sw_id);
|
||||
u16 mlx5_sf_sw_to_hw_id(struct mlx5_core_dev *dev, u32 controller, u16 sw_id);
|
||||
|
||||
int mlx5_sf_hw_table_sf_alloc(struct mlx5_core_dev *dev, u32 usr_sfnum);
|
||||
void mlx5_sf_hw_table_sf_free(struct mlx5_core_dev *dev, u16 id);
|
||||
void mlx5_sf_hw_table_sf_deferred_free(struct mlx5_core_dev *dev, u16 id);
|
||||
int mlx5_sf_hw_table_sf_alloc(struct mlx5_core_dev *dev, u32 controller, u32 usr_sfnum);
|
||||
void mlx5_sf_hw_table_sf_free(struct mlx5_core_dev *dev, u32 controller, u16 id);
|
||||
void mlx5_sf_hw_table_sf_deferred_free(struct mlx5_core_dev *dev, u32 controller, u16 id);
|
||||
bool mlx5_sf_hw_table_supported(const struct mlx5_core_dev *dev);
|
||||
|
||||
#endif
|
||||
|
@ -1151,20 +1151,6 @@ u64 mlx5_query_nic_system_image_guid(struct mlx5_core_dev *mdev)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx5_query_nic_system_image_guid);
|
||||
|
||||
/**
|
||||
* mlx5_eswitch_get_total_vports - Get total vports of the eswitch
|
||||
*
|
||||
* @dev: Pointer to core device
|
||||
*
|
||||
* mlx5_eswitch_get_total_vports returns total number of vports for
|
||||
* the eswitch.
|
||||
*/
|
||||
u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev)
|
||||
{
|
||||
return MLX5_SPECIAL_VPORTS(dev) + mlx5_core_max_vfs(dev) + mlx5_sf_max_functions(dev);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx5_eswitch_get_total_vports);
|
||||
|
||||
int mlx5_vport_get_other_func_cap(struct mlx5_core_dev *dev, u16 function_id, void *out)
|
||||
{
|
||||
u16 opmod = (MLX5_CAP_GENERAL << 1) | (HCA_CAP_OPMOD_GET_MAX & 0x01);
|
||||
|
@ -65,8 +65,6 @@ struct mlx5_flow_handle *
|
||||
mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *on_esw,
|
||||
struct mlx5_eswitch_rep *rep, u32 sqn);
|
||||
|
||||
u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev);
|
||||
|
||||
#ifdef CONFIG_MLX5_ESWITCH
|
||||
enum devlink_eswitch_encap_mode
|
||||
mlx5_eswitch_get_encap_mode(const struct mlx5_core_dev *dev);
|
||||
@ -126,6 +124,8 @@ u32 mlx5_eswitch_get_vport_metadata_for_set(struct mlx5_eswitch *esw,
|
||||
#define ESW_TUN_SLOW_TABLE_GOTO_VPORT_MARK ESW_TUN_OPTS_MASK
|
||||
|
||||
u8 mlx5_eswitch_mode(struct mlx5_core_dev *dev);
|
||||
u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev);
|
||||
|
||||
#else /* CONFIG_MLX5_ESWITCH */
|
||||
|
||||
static inline u8 mlx5_eswitch_mode(struct mlx5_core_dev *dev)
|
||||
@ -162,10 +162,17 @@ mlx5_eswitch_get_vport_metadata_mask(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_MLX5_ESWITCH */
|
||||
|
||||
static inline bool is_mdev_switchdev_mode(struct mlx5_core_dev *dev)
|
||||
{
|
||||
return mlx5_eswitch_mode(dev) == MLX5_ESWITCH_OFFLOADS;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -36,14 +36,6 @@
|
||||
#include <linux/mlx5/driver.h>
|
||||
#include <linux/mlx5/device.h>
|
||||
|
||||
#define MLX5_VPORT_PF_PLACEHOLDER (1u)
|
||||
#define MLX5_VPORT_UPLINK_PLACEHOLDER (1u)
|
||||
#define MLX5_VPORT_ECPF_PLACEHOLDER(mdev) (mlx5_ecpf_vport_exists(mdev))
|
||||
|
||||
#define MLX5_SPECIAL_VPORTS(mdev) (MLX5_VPORT_PF_PLACEHOLDER + \
|
||||
MLX5_VPORT_UPLINK_PLACEHOLDER + \
|
||||
MLX5_VPORT_ECPF_PLACEHOLDER(mdev))
|
||||
|
||||
#define MLX5_VPORT_MANAGER(mdev) \
|
||||
(MLX5_CAP_GEN(mdev, vport_group_manager) && \
|
||||
(MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) && \
|
||||
|
@ -98,11 +98,13 @@ struct devlink_port_pci_vf_attrs {
|
||||
* @controller: Associated controller number
|
||||
* @sf: Associated PCI SF for of the PCI PF for this port.
|
||||
* @pf: Associated PCI PF number for this port.
|
||||
* @external: when set, indicates if a port is for an external controller
|
||||
*/
|
||||
struct devlink_port_pci_sf_attrs {
|
||||
u32 controller;
|
||||
u32 sf;
|
||||
u16 pf;
|
||||
u8 external:1;
|
||||
};
|
||||
|
||||
/**
|
||||
@ -1508,7 +1510,8 @@ void devlink_port_attrs_pci_pf_set(struct devlink_port *devlink_port, u32 contro
|
||||
void devlink_port_attrs_pci_vf_set(struct devlink_port *devlink_port, u32 controller,
|
||||
u16 pf, u16 vf, bool external);
|
||||
void devlink_port_attrs_pci_sf_set(struct devlink_port *devlink_port,
|
||||
u32 controller, u16 pf, u32 sf);
|
||||
u32 controller, u16 pf, u32 sf,
|
||||
bool external);
|
||||
int devlink_sb_register(struct devlink *devlink, unsigned int sb_index,
|
||||
u32 size, u16 ingress_pools_count,
|
||||
u16 egress_pools_count, u16 ingress_tc_count,
|
||||
|
@ -8599,9 +8599,10 @@ EXPORT_SYMBOL_GPL(devlink_port_attrs_pci_vf_set);
|
||||
* @controller: associated controller number for the devlink port instance
|
||||
* @pf: associated PF for the devlink port instance
|
||||
* @sf: associated SF of a PF for the devlink port instance
|
||||
* @external: indicates if the port is for an external controller
|
||||
*/
|
||||
void devlink_port_attrs_pci_sf_set(struct devlink_port *devlink_port, u32 controller,
|
||||
u16 pf, u32 sf)
|
||||
u16 pf, u32 sf, bool external)
|
||||
{
|
||||
struct devlink_port_attrs *attrs = &devlink_port->attrs;
|
||||
int ret;
|
||||
@ -8615,6 +8616,7 @@ void devlink_port_attrs_pci_sf_set(struct devlink_port *devlink_port, u32 contro
|
||||
attrs->pci_sf.controller = controller;
|
||||
attrs->pci_sf.pf = pf;
|
||||
attrs->pci_sf.sf = sf;
|
||||
attrs->pci_sf.external = external;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(devlink_port_attrs_pci_sf_set);
|
||||
|
||||
@ -8667,6 +8669,13 @@ static int __devlink_port_phys_port_name_get(struct devlink_port *devlink_port,
|
||||
attrs->pci_vf.pf, attrs->pci_vf.vf);
|
||||
break;
|
||||
case DEVLINK_PORT_FLAVOUR_PCI_SF:
|
||||
if (attrs->pci_sf.external) {
|
||||
n = snprintf(name, len, "c%u", attrs->pci_sf.controller);
|
||||
if (n >= len)
|
||||
return -EINVAL;
|
||||
len -= n;
|
||||
name += n;
|
||||
}
|
||||
n = snprintf(name, len, "pf%usf%u", attrs->pci_sf.pf,
|
||||
attrs->pci_sf.sf);
|
||||
break;
|
||||
|
Loading…
Reference in New Issue
Block a user