mlx5-updates-2023-08-22

1) Patches #1..#13 From Jiri:
 
 The goal of this patchset is to make the SF code cleaner.
 
 Benefit from previously introduced devlink_port struct containerization
 to avoid unnecessary lookups in devlink port ops.
 
 Also, benefit from the devlink locking changes and avoid unnecessary
 reference counting.
 
 2) Patches #14,#15:
 
 Add ability to configure proto both UDP and TCP selectors in RX and TX
 directions.
 -----BEGIN PGP SIGNATURE-----
 
 iQEzBAABCAAdFiEEGhZs6bAKwk/OTgTpSD+KveBX+j4FAmTljEoACgkQSD+KveBX
 +j6PFAgAi1x6JuJDs2bdNHu9ocNUmLwGmg5k4SugO4QaKltIor1ZWupabK44Fd7d
 Wit7xLPwP6qOK0b3l6J2FtpeFn8nceyudiXmGGEoE/ea9j75GXQkydqDWBw6lvTx
 Y79FUks24G5eio/Lu/K3gdtcnx0W+vaYuWUNhgmF2d0NM4hRyuszdCe06cXjt524
 1EX/9WFSRmo1hga9xNeK8IHpF1E6CuBsvvKML2qJsuCmUZ2qRvnHBPjMEHAsof/G
 5mcpiG/l5f34fWzSgFla3HZzjuf2t8Vbku/gN++xrfFWp4q1ZIDbgp0twII6mF0v
 Oddkflx9DDXh8gBQikNiCy6Sg2VL5g==
 =qTlO
 -----END PGP SIGNATURE-----

Merge tag 'mlx5-updates-2023-08-22' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5-updates-2023-08-22

1) Patches #1..#13 From Jiri:

The goal of this patchset is to make the SF code cleaner.

Benefit from previously introduced devlink_port struct containerization
to avoid unnecessary lookups in devlink port ops.

Also, benefit from the devlink locking changes and avoid unnecessary
reference counting.

2) Patches #14,#15:

Add ability to configure proto both UDP and TCP selectors in RX and TX
directions.

* tag 'mlx5-updates-2023-08-22' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux:
  net/mlx5e: Support IPsec upper TCP protocol selector
  net/mlx5e: Support IPsec upper protocol selector field offload for RX
  net/mlx5: Store vport in struct mlx5_devlink_port and use it in port ops
  net/mlx5: Check vhca_resource_manager capability in each op and add extack msg
  net/mlx5: Relax mlx5_devlink_eswitch_get() return value checking
  net/mlx5: Return -EOPNOTSUPP in mlx5_devlink_port_fn_migratable_set() directly
  net/mlx5: Reduce number of vport lookups passing vport pointer instead of index
  net/mlx5: Embed struct devlink_port into driver structure
  net/mlx5: Don't register ops for non-PF/VF/SF port and avoid checks in ops
  net/mlx5: Remove no longer used mlx5_esw_offloads_sf_vport_enable/disable()
  net/mlx5: Introduce mlx5_eswitch_load/unload_sf_vport() and use it from SF code
  net/mlx5: Allow mlx5_esw_offloads_devlink_port_register() to register SFs
  net/mlx5: Push devlink port PF/VF init/cleanup calls out of devlink_port_register/unregister()
  net/mlx5: Push out SF devlink port init and cleanup code to separate helpers
  net/mlx5: Rework devlink port alloc/free into init/cleanup
====================

Link: https://lore.kernel.org/all/20230823051012.162483-1-saeed@kernel.org/
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
This commit is contained in:
Paolo Abeni 2023-08-24 15:23:49 +02:00
commit 9f6708a668
7 changed files with 373 additions and 305 deletions

View File

@ -440,9 +440,9 @@ static int mlx5e_xfrm_validate_state(struct mlx5_core_dev *mdev,
return -EINVAL;
}
if (x->sel.proto != IPPROTO_IP &&
(x->sel.proto != IPPROTO_UDP || x->xso.dir != XFRM_DEV_OFFLOAD_OUT)) {
NL_SET_ERR_MSG_MOD(extack, "Device does not support upper protocol other than UDP, and only Tx direction");
if (x->sel.proto != IPPROTO_IP && x->sel.proto != IPPROTO_UDP &&
x->sel.proto != IPPROTO_TCP) {
NL_SET_ERR_MSG_MOD(extack, "Device does not support upper protocol other than TCP/UDP");
return -EINVAL;
}
@ -983,9 +983,10 @@ static int mlx5e_xfrm_validate_policy(struct mlx5_core_dev *mdev,
return -EINVAL;
}
if (sel->proto != IPPROTO_IP &&
(sel->proto != IPPROTO_UDP || x->xdo.dir != XFRM_DEV_OFFLOAD_OUT)) {
NL_SET_ERR_MSG_MOD(extack, "Device does not support upper protocol other than UDP, and only Tx direction");
if (x->selector.proto != IPPROTO_IP &&
x->selector.proto != IPPROTO_UDP &&
x->selector.proto != IPPROTO_TCP) {
NL_SET_ERR_MSG_MOD(extack, "Device does not support upper protocol other than TCP/UDP");
return -EINVAL;
}

View File

@ -936,23 +936,42 @@ static void setup_fte_reg_c4(struct mlx5_flow_spec *spec, u32 reqid)
static void setup_fte_upper_proto_match(struct mlx5_flow_spec *spec, struct upspec *upspec)
{
if (upspec->proto != IPPROTO_UDP)
switch (upspec->proto) {
case IPPROTO_UDP:
if (upspec->dport) {
MLX5_SET(fte_match_set_lyr_2_4, spec->match_criteria,
udp_dport, upspec->dport_mask);
MLX5_SET(fte_match_set_lyr_2_4, spec->match_value,
udp_dport, upspec->dport);
}
if (upspec->sport) {
MLX5_SET(fte_match_set_lyr_2_4, spec->match_criteria,
udp_sport, upspec->sport_mask);
MLX5_SET(fte_match_set_lyr_2_4, spec->match_value,
udp_sport, upspec->sport);
}
break;
case IPPROTO_TCP:
if (upspec->dport) {
MLX5_SET(fte_match_set_lyr_2_4, spec->match_criteria,
tcp_dport, upspec->dport_mask);
MLX5_SET(fte_match_set_lyr_2_4, spec->match_value,
tcp_dport, upspec->dport);
}
if (upspec->sport) {
MLX5_SET(fte_match_set_lyr_2_4, spec->match_criteria,
tcp_sport, upspec->sport_mask);
MLX5_SET(fte_match_set_lyr_2_4, spec->match_value,
tcp_sport, upspec->sport);
}
break;
default:
return;
}
spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, spec->match_criteria, ip_protocol);
MLX5_SET(fte_match_set_lyr_2_4, spec->match_value, ip_protocol, upspec->proto);
if (upspec->dport) {
MLX5_SET(fte_match_set_lyr_2_4, spec->match_criteria, udp_dport,
upspec->dport_mask);
MLX5_SET(fte_match_set_lyr_2_4, spec->match_value, udp_dport, upspec->dport);
}
if (upspec->sport) {
MLX5_SET(fte_match_set_lyr_2_4, spec->match_criteria, udp_sport,
upspec->sport_mask);
MLX5_SET(fte_match_set_lyr_2_4, spec->match_value, udp_sport, upspec->sport);
}
}
static enum mlx5_flow_namespace_type ipsec_fs_get_ns(struct mlx5e_ipsec *ipsec,
@ -1243,6 +1262,7 @@ static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
setup_fte_spi(spec, attrs->spi);
setup_fte_esp(spec);
setup_fte_no_frags(spec);
setup_fte_upper_proto_match(spec, &attrs->upspec);
if (rx != ipsec->rx_esw)
err = setup_modify_header(ipsec, attrs->type,
@ -1519,6 +1539,7 @@ static int rx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
setup_fte_no_frags(spec);
setup_fte_upper_proto_match(spec, &attrs->upspec);
switch (attrs->action) {
case XFRM_POLICY_ALLOW:

View File

@ -21,19 +21,16 @@ static bool mlx5_esw_devlink_port_supported(struct mlx5_eswitch *esw, u16 vport_
mlx5_core_is_ec_vf_vport(esw->dev, vport_num);
}
static struct devlink_port *mlx5_esw_dl_port_alloc(struct mlx5_eswitch *esw, u16 vport_num)
static void mlx5_esw_offloads_pf_vf_devlink_port_attrs_set(struct mlx5_eswitch *esw,
u16 vport_num,
struct devlink_port *dl_port)
{
struct mlx5_core_dev *dev = esw->dev;
struct netdev_phys_item_id ppid = {};
struct devlink_port *dl_port;
u32 controller_num = 0;
bool external;
u16 pfnum;
dl_port = kzalloc(sizeof(*dl_port), GFP_KERNEL);
if (!dl_port)
return NULL;
mlx5_esw_get_port_parent_id(dev, &ppid);
pfnum = mlx5_get_dev_index(dev);
external = mlx5_core_is_ecpf_esw_manager(dev);
@ -55,12 +52,37 @@ static struct devlink_port *mlx5_esw_dl_port_alloc(struct mlx5_eswitch *esw, u16
devlink_port_attrs_pci_vf_set(dl_port, 0, pfnum,
vport_num - 1, false);
}
return dl_port;
}
static void mlx5_esw_dl_port_free(struct devlink_port *dl_port)
int mlx5_esw_offloads_pf_vf_devlink_port_init(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
kfree(dl_port);
struct mlx5_devlink_port *dl_port;
u16 vport_num = vport->vport;
if (!mlx5_esw_devlink_port_supported(esw, vport_num))
return 0;
dl_port = kzalloc(sizeof(*dl_port), GFP_KERNEL);
if (!dl_port)
return -ENOMEM;
mlx5_esw_offloads_pf_vf_devlink_port_attrs_set(esw, vport_num,
&dl_port->dl_port);
vport->dl_port = dl_port;
mlx5_devlink_port_init(dl_port, vport);
return 0;
}
void mlx5_esw_offloads_pf_vf_devlink_port_cleanup(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
if (!vport->dl_port)
return;
kfree(vport->dl_port);
vport->dl_port = NULL;
}
static const struct devlink_port_ops mlx5_esw_pf_vf_dl_port_ops = {
@ -72,74 +94,37 @@ static const struct devlink_port_ops mlx5_esw_pf_vf_dl_port_ops = {
.port_fn_migratable_set = mlx5_devlink_port_fn_migratable_set,
};
int mlx5_esw_offloads_devlink_port_register(struct mlx5_eswitch *esw, u16 vport_num)
static void mlx5_esw_offloads_sf_devlink_port_attrs_set(struct mlx5_eswitch *esw,
struct devlink_port *dl_port,
u32 controller, u32 sfnum)
{
struct mlx5_core_dev *dev = esw->dev;
struct devlink_port *dl_port;
unsigned int dl_port_index;
struct mlx5_vport *vport;
struct devlink *devlink;
int err;
struct netdev_phys_item_id ppid = {};
u16 pfnum;
if (!mlx5_esw_devlink_port_supported(esw, vport_num))
return 0;
pfnum = mlx5_get_dev_index(dev);
mlx5_esw_get_port_parent_id(dev, &ppid);
memcpy(dl_port->attrs.switch_id.id, &ppid.id[0], ppid.id_len);
dl_port->attrs.switch_id.id_len = ppid.id_len;
devlink_port_attrs_pci_sf_set(dl_port, controller, pfnum, sfnum, !!controller);
}
vport = mlx5_eswitch_get_vport(esw, vport_num);
if (IS_ERR(vport))
return PTR_ERR(vport);
dl_port = mlx5_esw_dl_port_alloc(esw, vport_num);
if (!dl_port)
return -ENOMEM;
devlink = priv_to_devlink(dev);
dl_port_index = mlx5_esw_vport_to_devlink_port_index(dev, vport_num);
err = devl_port_register_with_ops(devlink, dl_port, dl_port_index,
&mlx5_esw_pf_vf_dl_port_ops);
if (err)
goto reg_err;
err = devl_rate_leaf_create(dl_port, vport, NULL);
if (err)
goto rate_err;
int mlx5_esw_offloads_sf_devlink_port_init(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
struct mlx5_devlink_port *dl_port,
u32 controller, u32 sfnum)
{
mlx5_esw_offloads_sf_devlink_port_attrs_set(esw, &dl_port->dl_port, controller, sfnum);
vport->dl_port = dl_port;
mlx5_devlink_port_init(dl_port, vport);
return 0;
rate_err:
devl_port_unregister(dl_port);
reg_err:
mlx5_esw_dl_port_free(dl_port);
return err;
}
void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_eswitch *esw, u16 vport_num)
void mlx5_esw_offloads_sf_devlink_port_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
{
struct mlx5_vport *vport;
if (!mlx5_esw_devlink_port_supported(esw, vport_num))
return;
vport = mlx5_eswitch_get_vport(esw, vport_num);
if (IS_ERR(vport))
return;
mlx5_esw_qos_vport_update_group(esw, vport, NULL, NULL);
devl_rate_leaf_destroy(vport->dl_port);
devl_port_unregister(vport->dl_port);
mlx5_esw_dl_port_free(vport->dl_port);
vport->dl_port = NULL;
}
struct devlink_port *mlx5_esw_offloads_devlink_port(struct mlx5_eswitch *esw, u16 vport_num)
{
struct mlx5_vport *vport;
vport = mlx5_eswitch_get_vport(esw, vport_num);
return IS_ERR(vport) ? ERR_CAST(vport) : vport->dl_port;
}
static const struct devlink_port_ops mlx5_esw_dl_sf_port_ops = {
#ifdef CONFIG_MLX5_SF_MANAGER
.port_del = mlx5_devlink_sf_port_del,
@ -154,56 +139,62 @@ static const struct devlink_port_ops mlx5_esw_dl_sf_port_ops = {
#endif
};
int mlx5_esw_devlink_sf_port_register(struct mlx5_eswitch *esw, struct devlink_port *dl_port,
u16 vport_num, u32 controller, u32 sfnum)
int mlx5_esw_offloads_devlink_port_register(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
{
struct mlx5_core_dev *dev = esw->dev;
struct netdev_phys_item_id ppid = {};
const struct devlink_port_ops *ops;
struct mlx5_devlink_port *dl_port;
u16 vport_num = vport->vport;
unsigned int dl_port_index;
struct mlx5_vport *vport;
struct devlink *devlink;
u16 pfnum;
int err;
vport = mlx5_eswitch_get_vport(esw, vport_num);
if (IS_ERR(vport))
return PTR_ERR(vport);
dl_port = vport->dl_port;
if (!dl_port)
return 0;
if (mlx5_esw_is_sf_vport(esw, vport_num))
ops = &mlx5_esw_dl_sf_port_ops;
else if (mlx5_eswitch_is_pf_vf_vport(esw, vport_num))
ops = &mlx5_esw_pf_vf_dl_port_ops;
else
ops = NULL;
pfnum = mlx5_get_dev_index(dev);
mlx5_esw_get_port_parent_id(dev, &ppid);
memcpy(dl_port->attrs.switch_id.id, &ppid.id[0], ppid.id_len);
dl_port->attrs.switch_id.id_len = ppid.id_len;
devlink_port_attrs_pci_sf_set(dl_port, controller, pfnum, sfnum, !!controller);
devlink = priv_to_devlink(dev);
dl_port_index = mlx5_esw_vport_to_devlink_port_index(dev, vport_num);
err = devl_port_register_with_ops(devlink, dl_port, dl_port_index,
&mlx5_esw_dl_sf_port_ops);
err = devl_port_register_with_ops(devlink, &dl_port->dl_port, dl_port_index, ops);
if (err)
return err;
err = devl_rate_leaf_create(dl_port, vport, NULL);
err = devl_rate_leaf_create(&dl_port->dl_port, vport, NULL);
if (err)
goto rate_err;
vport->dl_port = dl_port;
return 0;
rate_err:
devl_port_unregister(dl_port);
devl_port_unregister(&dl_port->dl_port);
return err;
}
void mlx5_esw_devlink_sf_port_unregister(struct mlx5_eswitch *esw, u16 vport_num)
void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
{
struct mlx5_devlink_port *dl_port;
if (!vport->dl_port)
return;
dl_port = vport->dl_port;
mlx5_esw_qos_vport_update_group(esw, vport, NULL, NULL);
devl_rate_leaf_destroy(&dl_port->dl_port);
devl_port_unregister(&dl_port->dl_port);
}
struct devlink_port *mlx5_esw_offloads_devlink_port(struct mlx5_eswitch *esw, u16 vport_num)
{
struct mlx5_vport *vport;
vport = mlx5_eswitch_get_vport(esw, vport_num);
if (IS_ERR(vport))
return;
mlx5_esw_qos_vport_update_group(esw, vport, NULL, NULL);
devl_rate_leaf_destroy(vport->dl_port);
devl_port_unregister(vport->dl_port);
vport->dl_port = NULL;
return IS_ERR(vport) ? ERR_CAST(vport) : &vport->dl_port->dl_port;
}

View File

@ -77,18 +77,31 @@ static int mlx5_eswitch_check(const struct mlx5_core_dev *dev)
return 0;
}
struct mlx5_eswitch *mlx5_devlink_eswitch_get(struct devlink *devlink)
static struct mlx5_eswitch *__mlx5_devlink_eswitch_get(struct devlink *devlink, bool check)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
int err;
err = mlx5_eswitch_check(dev);
if (err)
return ERR_PTR(err);
if (check) {
err = mlx5_eswitch_check(dev);
if (err)
return ERR_PTR(err);
}
return dev->priv.eswitch;
}
struct mlx5_eswitch *__must_check
mlx5_devlink_eswitch_get(struct devlink *devlink)
{
return __mlx5_devlink_eswitch_get(devlink, true);
}
struct mlx5_eswitch *mlx5_devlink_eswitch_nocheck_get(struct devlink *devlink)
{
return __mlx5_devlink_eswitch_get(devlink, false);
}
struct mlx5_vport *__must_check
mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num)
{
@ -882,16 +895,12 @@ static void esw_vport_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport
esw_vport_cleanup_acl(esw, vport);
}
int mlx5_esw_vport_enable(struct mlx5_eswitch *esw, u16 vport_num,
int mlx5_esw_vport_enable(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
enum mlx5_eswitch_vport_event enabled_events)
{
struct mlx5_vport *vport;
u16 vport_num = vport->vport;
int ret;
vport = mlx5_eswitch_get_vport(esw, vport_num);
if (IS_ERR(vport))
return PTR_ERR(vport);
mutex_lock(&esw->state_lock);
WARN_ON(vport->enabled);
@ -912,7 +921,7 @@ int mlx5_esw_vport_enable(struct mlx5_eswitch *esw, u16 vport_num,
(!vport_num && mlx5_core_is_ecpf(esw->dev)))
vport->info.trusted = true;
if (!mlx5_esw_is_manager_vport(esw, vport->vport) &&
if (!mlx5_esw_is_manager_vport(esw, vport_num) &&
MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) {
ret = mlx5_esw_vport_vhca_id_set(esw, vport_num);
if (ret)
@ -939,15 +948,12 @@ err_vhca_mapping:
return ret;
}
void mlx5_esw_vport_disable(struct mlx5_eswitch *esw, u16 vport_num)
void mlx5_esw_vport_disable(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
{
struct mlx5_vport *vport;
vport = mlx5_eswitch_get_vport(esw, vport_num);
if (IS_ERR(vport))
return;
u16 vport_num = vport->vport;
mutex_lock(&esw->state_lock);
if (!vport->enabled)
goto done;
@ -957,9 +963,9 @@ void mlx5_esw_vport_disable(struct mlx5_eswitch *esw, u16 vport_num)
/* Disable events from this vport */
if (MLX5_CAP_GEN(esw->dev, log_max_l2_table))
arm_vport_context_events_cmd(esw->dev, vport->vport, 0);
arm_vport_context_events_cmd(esw->dev, vport_num, 0);
if (!mlx5_esw_is_manager_vport(esw, vport->vport) &&
if (!mlx5_esw_is_manager_vport(esw, vport_num) &&
MLX5_CAP_GEN(esw->dev, vhca_resource_manager))
mlx5_esw_vport_vhca_id_clear(esw, vport_num);
@ -1068,30 +1074,104 @@ static void mlx5_eswitch_clear_ec_vf_vports_info(struct mlx5_eswitch *esw)
}
}
static int mlx5_eswitch_load_vport(struct mlx5_eswitch *esw, u16 vport_num,
static int mlx5_eswitch_load_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
enum mlx5_eswitch_vport_event enabled_events)
{
int err;
err = mlx5_esw_vport_enable(esw, vport_num, enabled_events);
err = mlx5_esw_vport_enable(esw, vport, enabled_events);
if (err)
return err;
err = mlx5_esw_offloads_load_rep(esw, vport_num);
err = mlx5_esw_offloads_load_rep(esw, vport);
if (err)
goto err_rep;
return err;
err_rep:
mlx5_esw_vport_disable(esw, vport_num);
mlx5_esw_vport_disable(esw, vport);
return err;
}
static void mlx5_eswitch_unload_vport(struct mlx5_eswitch *esw, u16 vport_num)
static void mlx5_eswitch_unload_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
{
mlx5_esw_offloads_unload_rep(esw, vport_num);
mlx5_esw_vport_disable(esw, vport_num);
mlx5_esw_offloads_unload_rep(esw, vport);
mlx5_esw_vport_disable(esw, vport);
}
static int mlx5_eswitch_load_pf_vf_vport(struct mlx5_eswitch *esw, u16 vport_num,
enum mlx5_eswitch_vport_event enabled_events)
{
struct mlx5_vport *vport;
int err;
vport = mlx5_eswitch_get_vport(esw, vport_num);
if (IS_ERR(vport))
return PTR_ERR(vport);
err = mlx5_esw_offloads_init_pf_vf_rep(esw, vport);
if (err)
return err;
err = mlx5_eswitch_load_vport(esw, vport, enabled_events);
if (err)
goto err_load;
return 0;
err_load:
mlx5_esw_offloads_cleanup_pf_vf_rep(esw, vport);
return err;
}
static void mlx5_eswitch_unload_pf_vf_vport(struct mlx5_eswitch *esw, u16 vport_num)
{
struct mlx5_vport *vport;
vport = mlx5_eswitch_get_vport(esw, vport_num);
if (IS_ERR(vport))
return;
mlx5_eswitch_unload_vport(esw, vport);
mlx5_esw_offloads_cleanup_pf_vf_rep(esw, vport);
}
int mlx5_eswitch_load_sf_vport(struct mlx5_eswitch *esw, u16 vport_num,
enum mlx5_eswitch_vport_event enabled_events,
struct mlx5_devlink_port *dl_port, u32 controller, u32 sfnum)
{
struct mlx5_vport *vport;
int err;
vport = mlx5_eswitch_get_vport(esw, vport_num);
if (IS_ERR(vport))
return PTR_ERR(vport);
err = mlx5_esw_offloads_init_sf_rep(esw, vport, dl_port, controller, sfnum);
if (err)
return err;
err = mlx5_eswitch_load_vport(esw, vport, enabled_events);
if (err)
goto err_load;
return 0;
err_load:
mlx5_esw_offloads_cleanup_sf_rep(esw, vport);
return err;
}
void mlx5_eswitch_unload_sf_vport(struct mlx5_eswitch *esw, u16 vport_num)
{
struct mlx5_vport *vport;
vport = mlx5_eswitch_get_vport(esw, vport_num);
if (IS_ERR(vport))
return;
mlx5_eswitch_unload_vport(esw, vport);
mlx5_esw_offloads_cleanup_sf_rep(esw, vport);
}
void mlx5_eswitch_unload_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs)
@ -1102,7 +1182,7 @@ void mlx5_eswitch_unload_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs)
mlx5_esw_for_each_vf_vport(esw, i, vport, num_vfs) {
if (!vport->enabled)
continue;
mlx5_eswitch_unload_vport(esw, vport->vport);
mlx5_eswitch_unload_pf_vf_vport(esw, vport->vport);
}
}
@ -1115,7 +1195,7 @@ static void mlx5_eswitch_unload_ec_vf_vports(struct mlx5_eswitch *esw,
mlx5_esw_for_each_ec_vf_vport(esw, i, vport, num_ec_vfs) {
if (!vport->enabled)
continue;
mlx5_eswitch_unload_vport(esw, vport->vport);
mlx5_eswitch_unload_pf_vf_vport(esw, vport->vport);
}
}
@ -1127,7 +1207,7 @@ int mlx5_eswitch_load_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs,
int err;
mlx5_esw_for_each_vf_vport(esw, i, vport, num_vfs) {
err = mlx5_eswitch_load_vport(esw, vport->vport, enabled_events);
err = mlx5_eswitch_load_pf_vf_vport(esw, vport->vport, enabled_events);
if (err)
goto vf_err;
}
@ -1147,7 +1227,7 @@ static int mlx5_eswitch_load_ec_vf_vports(struct mlx5_eswitch *esw, u16 num_ec_v
int err;
mlx5_esw_for_each_ec_vf_vport(esw, i, vport, num_ec_vfs) {
err = mlx5_eswitch_load_vport(esw, vport->vport, enabled_events);
err = mlx5_eswitch_load_pf_vf_vport(esw, vport->vport, enabled_events);
if (err)
goto vf_err;
}
@ -1189,7 +1269,7 @@ mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
int ret;
/* Enable PF vport */
ret = mlx5_eswitch_load_vport(esw, MLX5_VPORT_PF, enabled_events);
ret = mlx5_eswitch_load_pf_vf_vport(esw, MLX5_VPORT_PF, enabled_events);
if (ret)
return ret;
@ -1200,7 +1280,7 @@ mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
/* Enable ECPF vport */
if (mlx5_ecpf_vport_exists(esw->dev)) {
ret = mlx5_eswitch_load_vport(esw, MLX5_VPORT_ECPF, enabled_events);
ret = mlx5_eswitch_load_pf_vf_vport(esw, MLX5_VPORT_ECPF, enabled_events);
if (ret)
goto ecpf_err;
if (mlx5_core_ec_sriov_enabled(esw->dev)) {
@ -1223,11 +1303,11 @@ vf_err:
mlx5_eswitch_unload_ec_vf_vports(esw, esw->esw_funcs.num_ec_vfs);
ec_vf_err:
if (mlx5_ecpf_vport_exists(esw->dev))
mlx5_eswitch_unload_vport(esw, MLX5_VPORT_ECPF);
mlx5_eswitch_unload_pf_vf_vport(esw, MLX5_VPORT_ECPF);
ecpf_err:
host_pf_disable_hca(esw->dev);
pf_hca_err:
mlx5_eswitch_unload_vport(esw, MLX5_VPORT_PF);
mlx5_eswitch_unload_pf_vf_vport(esw, MLX5_VPORT_PF);
return ret;
}
@ -1241,11 +1321,11 @@ void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw)
if (mlx5_ecpf_vport_exists(esw->dev)) {
if (mlx5_core_ec_sriov_enabled(esw->dev))
mlx5_eswitch_unload_ec_vf_vports(esw, esw->esw_funcs.num_vfs);
mlx5_eswitch_unload_vport(esw, MLX5_VPORT_ECPF);
mlx5_eswitch_unload_pf_vf_vport(esw, MLX5_VPORT_ECPF);
}
host_pf_disable_hca(esw->dev);
mlx5_eswitch_unload_vport(esw, MLX5_VPORT_PF);
mlx5_eswitch_unload_pf_vf_vport(esw, MLX5_VPORT_PF);
}
static void mlx5_eswitch_get_devlink_param(struct mlx5_eswitch *esw)
@ -1918,6 +1998,12 @@ bool mlx5_eswitch_is_vf_vport(struct mlx5_eswitch *esw, u16 vport_num)
return mlx5_esw_check_port_type(esw, vport_num, MLX5_ESW_VPT_VF);
}
bool mlx5_eswitch_is_pf_vf_vport(struct mlx5_eswitch *esw, u16 vport_num)
{
return vport_num == MLX5_VPORT_PF ||
mlx5_eswitch_is_vf_vport(esw, vport_num);
}
bool mlx5_esw_is_sf_vport(struct mlx5_eswitch *esw, u16 vport_num)
{
return mlx5_esw_check_port_type(esw, vport_num, MLX5_ESW_VPT_SF);

View File

@ -172,6 +172,29 @@ enum mlx5_eswitch_vport_event {
MLX5_VPORT_PROMISC_CHANGE = BIT(3),
};
struct mlx5_vport;
struct mlx5_devlink_port {
struct devlink_port dl_port;
struct mlx5_vport *vport;
};
static inline void mlx5_devlink_port_init(struct mlx5_devlink_port *dl_port,
struct mlx5_vport *vport)
{
dl_port->vport = vport;
}
static inline struct mlx5_devlink_port *mlx5_devlink_port_get(struct devlink_port *dl_port)
{
return container_of(dl_port, struct mlx5_devlink_port, dl_port);
}
static inline struct mlx5_vport *mlx5_devlink_port_vport_get(struct devlink_port *dl_port)
{
return mlx5_devlink_port_get(dl_port)->vport;
}
struct mlx5_vport {
struct mlx5_core_dev *dev;
struct hlist_head uc_list[MLX5_L2_ADDR_HASH_SIZE];
@ -200,7 +223,7 @@ struct mlx5_vport {
bool enabled;
enum mlx5_eswitch_vport_event enabled_events;
int index;
struct devlink_port *dl_port;
struct mlx5_devlink_port *dl_port;
};
struct mlx5_esw_indir_table;
@ -675,11 +698,16 @@ void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw);
MLX5_CAP_GEN_2((esw->dev), ec_vf_vport_base) +\
(last) - 1)
struct mlx5_eswitch *mlx5_devlink_eswitch_get(struct devlink *devlink);
struct mlx5_eswitch *__must_check
mlx5_devlink_eswitch_get(struct devlink *devlink);
struct mlx5_eswitch *mlx5_devlink_eswitch_nocheck_get(struct devlink *devlink);
struct mlx5_vport *__must_check
mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num);
bool mlx5_eswitch_is_vf_vport(struct mlx5_eswitch *esw, u16 vport_num);
bool mlx5_eswitch_is_pf_vf_vport(struct mlx5_eswitch *esw, u16 vport_num);
bool mlx5_esw_is_sf_vport(struct mlx5_eswitch *esw, u16 vport_num);
int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data);
@ -689,9 +717,9 @@ mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
enum mlx5_eswitch_vport_event enabled_events);
void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw);
int mlx5_esw_vport_enable(struct mlx5_eswitch *esw, u16 vport_num,
int mlx5_esw_vport_enable(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
enum mlx5_eswitch_vport_event enabled_events);
void mlx5_esw_vport_disable(struct mlx5_eswitch *esw, u16 vport_num);
void mlx5_esw_vport_disable(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
int
esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw,
@ -729,24 +757,40 @@ void mlx5_esw_set_spec_source_port(struct mlx5_eswitch *esw,
u16 vport,
struct mlx5_flow_spec *spec);
int mlx5_esw_offloads_load_rep(struct mlx5_eswitch *esw, u16 vport_num);
void mlx5_esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num);
int mlx5_esw_offloads_init_pf_vf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
void mlx5_esw_offloads_cleanup_pf_vf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
int mlx5_esw_offloads_init_sf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
struct mlx5_devlink_port *dl_port,
u32 controller, u32 sfnum);
void mlx5_esw_offloads_cleanup_sf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
int mlx5_esw_offloads_load_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
void mlx5_esw_offloads_unload_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
int mlx5_eswitch_load_sf_vport(struct mlx5_eswitch *esw, u16 vport_num,
enum mlx5_eswitch_vport_event enabled_events,
struct mlx5_devlink_port *dl_port, u32 controller, u32 sfnum);
void mlx5_eswitch_unload_sf_vport(struct mlx5_eswitch *esw, u16 vport_num);
int mlx5_eswitch_load_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs,
enum mlx5_eswitch_vport_event enabled_events);
void mlx5_eswitch_unload_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs);
int mlx5_esw_offloads_devlink_port_register(struct mlx5_eswitch *esw, u16 vport_num);
void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_eswitch *esw, u16 vport_num);
int mlx5_esw_offloads_pf_vf_devlink_port_init(struct mlx5_eswitch *esw,
struct mlx5_vport *vport);
void mlx5_esw_offloads_pf_vf_devlink_port_cleanup(struct mlx5_eswitch *esw,
struct mlx5_vport *vport);
int mlx5_esw_offloads_sf_devlink_port_init(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
struct mlx5_devlink_port *dl_port,
u32 controller, u32 sfnum);
void mlx5_esw_offloads_sf_devlink_port_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
int mlx5_esw_offloads_devlink_port_register(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
struct devlink_port *mlx5_esw_offloads_devlink_port(struct mlx5_eswitch *esw, u16 vport_num);
int mlx5_esw_devlink_sf_port_register(struct mlx5_eswitch *esw, struct devlink_port *dl_port,
u16 vport_num, u32 controller, u32 sfnum);
void mlx5_esw_devlink_sf_port_unregister(struct mlx5_eswitch *esw, u16 vport_num);
int mlx5_esw_offloads_sf_vport_enable(struct mlx5_eswitch *esw, struct devlink_port *dl_port,
u16 vport_num, u32 controller, u32 sfnum);
void mlx5_esw_offloads_sf_vport_disable(struct mlx5_eswitch *esw, u16 vport_num);
int mlx5_esw_sf_max_hpf_functions(struct mlx5_core_dev *dev, u16 *max_sfs, u16 *sf_base_id);
int mlx5_esw_vport_vhca_id_set(struct mlx5_eswitch *esw, u16 vport_num);

View File

@ -2535,35 +2535,63 @@ static void mlx5_esw_offloads_rep_unload(struct mlx5_eswitch *esw, u16 vport_num
__esw_offloads_unload_rep(esw, rep, rep_type);
}
int mlx5_esw_offloads_load_rep(struct mlx5_eswitch *esw, u16 vport_num)
int mlx5_esw_offloads_init_pf_vf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
{
if (esw->mode != MLX5_ESWITCH_OFFLOADS)
return 0;
return mlx5_esw_offloads_pf_vf_devlink_port_init(esw, vport);
}
void mlx5_esw_offloads_cleanup_pf_vf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
{
if (esw->mode != MLX5_ESWITCH_OFFLOADS)
return;
mlx5_esw_offloads_pf_vf_devlink_port_cleanup(esw, vport);
}
int mlx5_esw_offloads_init_sf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
struct mlx5_devlink_port *dl_port,
u32 controller, u32 sfnum)
{
return mlx5_esw_offloads_sf_devlink_port_init(esw, vport, dl_port, controller, sfnum);
}
void mlx5_esw_offloads_cleanup_sf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
{
mlx5_esw_offloads_sf_devlink_port_cleanup(esw, vport);
}
int mlx5_esw_offloads_load_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
{
int err;
if (esw->mode != MLX5_ESWITCH_OFFLOADS)
return 0;
err = mlx5_esw_offloads_devlink_port_register(esw, vport_num);
err = mlx5_esw_offloads_devlink_port_register(esw, vport);
if (err)
return err;
err = mlx5_esw_offloads_rep_load(esw, vport_num);
err = mlx5_esw_offloads_rep_load(esw, vport->vport);
if (err)
goto load_err;
return err;
load_err:
mlx5_esw_offloads_devlink_port_unregister(esw, vport_num);
mlx5_esw_offloads_devlink_port_unregister(esw, vport);
return err;
}
void mlx5_esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num)
void mlx5_esw_offloads_unload_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
{
if (esw->mode != MLX5_ESWITCH_OFFLOADS)
return;
mlx5_esw_offloads_rep_unload(esw, vport_num);
mlx5_esw_offloads_rep_unload(esw, vport->vport);
mlx5_esw_offloads_devlink_port_unregister(esw, vport_num);
mlx5_esw_offloads_devlink_port_unregister(esw, vport);
}
static int esw_set_slave_root_fdb(struct mlx5_core_dev *master,
@ -3606,7 +3634,7 @@ static bool esw_offloads_devlink_ns_eq_netdev_ns(struct devlink *devlink)
struct net *devl_net, *netdev_net;
struct mlx5_eswitch *esw;
esw = mlx5_devlink_eswitch_get(devlink);
esw = mlx5_devlink_eswitch_nocheck_get(devlink);
netdev_net = dev_net(esw->dev->mlx5e_res.uplink_netdev);
devl_net = devlink_net(devlink);
@ -4102,38 +4130,6 @@ u32 mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw,
}
EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_match);
int mlx5_esw_offloads_sf_vport_enable(struct mlx5_eswitch *esw, struct devlink_port *dl_port,
u16 vport_num, u32 controller, u32 sfnum)
{
int err;
err = mlx5_esw_vport_enable(esw, vport_num, MLX5_VPORT_UC_ADDR_CHANGE);
if (err)
return err;
err = mlx5_esw_devlink_sf_port_register(esw, dl_port, vport_num, controller, sfnum);
if (err)
goto devlink_err;
err = mlx5_esw_offloads_rep_load(esw, vport_num);
if (err)
goto rep_err;
return 0;
rep_err:
mlx5_esw_devlink_sf_port_unregister(esw, vport_num);
devlink_err:
mlx5_esw_vport_disable(esw, vport_num);
return err;
}
void mlx5_esw_offloads_sf_vport_disable(struct mlx5_eswitch *esw, u16 vport_num)
{
mlx5_esw_offloads_rep_unload(esw, vport_num);
mlx5_esw_devlink_sf_port_unregister(esw, vport_num);
mlx5_esw_vport_disable(esw, vport_num);
}
static int mlx5_esw_query_vport_vhca_id(struct mlx5_eswitch *esw, u16 vport_num, u16 *vhca_id)
{
int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
@ -4222,35 +4218,12 @@ u32 mlx5_eswitch_get_vport_metadata_for_set(struct mlx5_eswitch *esw,
}
EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_set);
static bool
is_port_function_supported(struct mlx5_eswitch *esw, u16 vport_num)
{
return vport_num == MLX5_VPORT_PF ||
mlx5_eswitch_is_vf_vport(esw, vport_num) ||
mlx5_esw_is_sf_vport(esw, vport_num);
}
int mlx5_devlink_port_fn_hw_addr_get(struct devlink_port *port,
u8 *hw_addr, int *hw_addr_len,
struct netlink_ext_ack *extack)
{
struct mlx5_eswitch *esw;
struct mlx5_vport *vport;
u16 vport_num;
esw = mlx5_devlink_eswitch_get(port->devlink);
if (IS_ERR(esw))
return PTR_ERR(esw);
vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index);
if (!is_port_function_supported(esw, vport_num))
return -EOPNOTSUPP;
vport = mlx5_eswitch_get_vport(esw, vport_num);
if (IS_ERR(vport)) {
NL_SET_ERR_MSG_MOD(extack, "Invalid port");
return PTR_ERR(vport);
}
struct mlx5_eswitch *esw = mlx5_devlink_eswitch_nocheck_get(port->devlink);
struct mlx5_vport *vport = mlx5_devlink_port_vport_get(port);
mutex_lock(&esw->state_lock);
ether_addr_copy(hw_addr, vport->info.mac);
@ -4263,58 +4236,26 @@ int mlx5_devlink_port_fn_hw_addr_set(struct devlink_port *port,
const u8 *hw_addr, int hw_addr_len,
struct netlink_ext_ack *extack)
{
struct mlx5_eswitch *esw;
u16 vport_num;
struct mlx5_eswitch *esw = mlx5_devlink_eswitch_nocheck_get(port->devlink);
struct mlx5_vport *vport = mlx5_devlink_port_vport_get(port);
esw = mlx5_devlink_eswitch_get(port->devlink);
if (IS_ERR(esw)) {
NL_SET_ERR_MSG_MOD(extack, "Eswitch doesn't support set hw_addr");
return PTR_ERR(esw);
}
vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index);
if (!is_port_function_supported(esw, vport_num)) {
NL_SET_ERR_MSG_MOD(extack, "Port doesn't support set hw_addr");
return -EINVAL;
}
return mlx5_eswitch_set_vport_mac(esw, vport_num, hw_addr);
}
static struct mlx5_vport *
mlx5_devlink_port_fn_get_vport(struct devlink_port *port, struct mlx5_eswitch *esw)
{
u16 vport_num;
if (!MLX5_CAP_GEN(esw->dev, vhca_resource_manager))
return ERR_PTR(-EOPNOTSUPP);
vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index);
if (!is_port_function_supported(esw, vport_num))
return ERR_PTR(-EOPNOTSUPP);
return mlx5_eswitch_get_vport(esw, vport_num);
return mlx5_eswitch_set_vport_mac(esw, vport->vport, hw_addr);
}
int mlx5_devlink_port_fn_migratable_get(struct devlink_port *port, bool *is_enabled,
struct netlink_ext_ack *extack)
{
struct mlx5_eswitch *esw;
struct mlx5_vport *vport;
esw = mlx5_devlink_eswitch_get(port->devlink);
if (IS_ERR(esw))
return PTR_ERR(esw);
struct mlx5_eswitch *esw = mlx5_devlink_eswitch_nocheck_get(port->devlink);
struct mlx5_vport *vport = mlx5_devlink_port_vport_get(port);
if (!MLX5_CAP_GEN(esw->dev, migration)) {
NL_SET_ERR_MSG_MOD(extack, "Device doesn't support migration");
return -EOPNOTSUPP;
}
vport = mlx5_devlink_port_fn_get_vport(port, esw);
if (IS_ERR(vport)) {
NL_SET_ERR_MSG_MOD(extack, "Invalid port");
return PTR_ERR(vport);
if (!MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) {
NL_SET_ERR_MSG_MOD(extack, "Device doesn't support VHCA management");
return -EOPNOTSUPP;
}
mutex_lock(&esw->state_lock);
@ -4326,26 +4267,21 @@ int mlx5_devlink_port_fn_migratable_get(struct devlink_port *port, bool *is_enab
int mlx5_devlink_port_fn_migratable_set(struct devlink_port *port, bool enable,
struct netlink_ext_ack *extack)
{
struct mlx5_eswitch *esw = mlx5_devlink_eswitch_nocheck_get(port->devlink);
struct mlx5_vport *vport = mlx5_devlink_port_vport_get(port);
int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
struct mlx5_eswitch *esw;
struct mlx5_vport *vport;
void *query_ctx;
void *hca_caps;
int err = -EOPNOTSUPP;
esw = mlx5_devlink_eswitch_get(port->devlink);
if (IS_ERR(esw))
return PTR_ERR(esw);
int err;
if (!MLX5_CAP_GEN(esw->dev, migration)) {
NL_SET_ERR_MSG_MOD(extack, "Device doesn't support migration");
return err;
return -EOPNOTSUPP;
}
vport = mlx5_devlink_port_fn_get_vport(port, esw);
if (IS_ERR(vport)) {
NL_SET_ERR_MSG_MOD(extack, "Invalid port");
return PTR_ERR(vport);
if (!MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) {
NL_SET_ERR_MSG_MOD(extack, "Device doesn't support VHCA management");
return -EOPNOTSUPP;
}
mutex_lock(&esw->state_lock);
@ -4390,17 +4326,12 @@ out:
int mlx5_devlink_port_fn_roce_get(struct devlink_port *port, bool *is_enabled,
struct netlink_ext_ack *extack)
{
struct mlx5_eswitch *esw;
struct mlx5_vport *vport;
struct mlx5_eswitch *esw = mlx5_devlink_eswitch_nocheck_get(port->devlink);
struct mlx5_vport *vport = mlx5_devlink_port_vport_get(port);
esw = mlx5_devlink_eswitch_get(port->devlink);
if (IS_ERR(esw))
return PTR_ERR(esw);
vport = mlx5_devlink_port_fn_get_vport(port, esw);
if (IS_ERR(vport)) {
NL_SET_ERR_MSG_MOD(extack, "Invalid port");
return PTR_ERR(vport);
if (!MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) {
NL_SET_ERR_MSG_MOD(extack, "Device doesn't support VHCA management");
return -EOPNOTSUPP;
}
mutex_lock(&esw->state_lock);
@ -4412,24 +4343,18 @@ int mlx5_devlink_port_fn_roce_get(struct devlink_port *port, bool *is_enabled,
int mlx5_devlink_port_fn_roce_set(struct devlink_port *port, bool enable,
struct netlink_ext_ack *extack)
{
struct mlx5_eswitch *esw = mlx5_devlink_eswitch_nocheck_get(port->devlink);
struct mlx5_vport *vport = mlx5_devlink_port_vport_get(port);
int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
struct mlx5_eswitch *esw;
struct mlx5_vport *vport;
u16 vport_num = vport->vport;
void *query_ctx;
void *hca_caps;
u16 vport_num;
int err;
esw = mlx5_devlink_eswitch_get(port->devlink);
if (IS_ERR(esw))
return PTR_ERR(esw);
vport = mlx5_devlink_port_fn_get_vport(port, esw);
if (IS_ERR(vport)) {
NL_SET_ERR_MSG_MOD(extack, "Invalid port");
return PTR_ERR(vport);
if (!MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) {
NL_SET_ERR_MSG_MOD(extack, "Device doesn't support VHCA management");
return -EOPNOTSUPP;
}
vport_num = vport->vport;
mutex_lock(&esw->state_lock);

View File

@ -12,7 +12,7 @@
#include "diag/sf_tracepoint.h"
struct mlx5_sf {
struct devlink_port dl_port;
struct mlx5_devlink_port dl_port;
unsigned int port_index;
u32 controller;
u16 id;
@ -292,11 +292,11 @@ static int mlx5_sf_add(struct mlx5_core_dev *dev, struct mlx5_sf_table *table,
if (IS_ERR(sf))
return PTR_ERR(sf);
err = mlx5_esw_offloads_sf_vport_enable(esw, &sf->dl_port, sf->hw_fn_id,
new_attr->controller, new_attr->sfnum);
err = mlx5_eswitch_load_sf_vport(esw, sf->hw_fn_id, MLX5_VPORT_UC_ADDR_CHANGE,
&sf->dl_port, new_attr->controller, new_attr->sfnum);
if (err)
goto esw_err;
*dl_port = &sf->dl_port;
*dl_port = &sf->dl_port.dl_port;
trace_mlx5_sf_add(dev, sf->port_index, sf->controller, sf->hw_fn_id, new_attr->sfnum);
return 0;
@ -400,7 +400,7 @@ int mlx5_devlink_sf_port_del(struct devlink *devlink,
goto sf_err;
}
mlx5_esw_offloads_sf_vport_disable(esw, sf->hw_fn_id);
mlx5_eswitch_unload_sf_vport(esw, sf->hw_fn_id);
mlx5_sf_id_erase(table, sf);
mutex_lock(&table->sf_state_lock);
@ -472,7 +472,7 @@ static void mlx5_sf_deactivate_all(struct mlx5_sf_table *table)
* arrive. It is safe to destroy all user created SFs.
*/
xa_for_each(&table->port_indices, index, sf) {
mlx5_esw_offloads_sf_vport_disable(esw, sf->hw_fn_id);
mlx5_eswitch_unload_sf_vport(esw, sf->hw_fn_id);
mlx5_sf_id_erase(table, sf);
mlx5_sf_dealloc(table, sf);
}