2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-27 06:34:11 +08:00

Merge branch 'mlx5-next' of git://git.kernel.org/pub/scm/linux/kernel/git/mellanox/linux

Merge mlx5-next shared branched into net-next,

From Bodong Wang:
1) Introduction of ECPF (Embedded CPU Physical Function), and low level
bits for mlx5 SmartNic capabilities support.
2) Vport enumeration refactoring that affect mlx5_ib and mlx5_core

From Aya Levin,
3) Add support for 50Gbps per lane link modes in the Port Type and Speed
register (PTYS)
4) Refactor low level query functions for PTYS register
5) Add support for 50Gbps per lane link modes to mlx5_ib

Note: due to a change in API in mlx5/core and a later patch from net-next,
a fixup was squashed with this merge commit that replaces FDB_UPLINK_VPORT
with MLX5_VPORT_UPLINK which exists only in upstream net-next.

Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
This commit is contained in:
Saeed Mahameed 2019-02-15 15:16:36 -08:00
commit 259fae5a2c
36 changed files with 848 additions and 474 deletions

View File

@ -345,3 +345,40 @@ int mlx5_cmd_alloc_q_counter(struct mlx5_core_dev *dev, u16 *counter_id,
counter_set_id);
return err;
}
int mlx5_cmd_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb,
u16 opmod, u8 port)
{
int outlen = MLX5_ST_SZ_BYTES(mad_ifc_out);
int inlen = MLX5_ST_SZ_BYTES(mad_ifc_in);
int err = -ENOMEM;
void *data;
void *resp;
u32 *out;
u32 *in;
in = kzalloc(inlen, GFP_KERNEL);
out = kzalloc(outlen, GFP_KERNEL);
if (!in || !out)
goto out;
MLX5_SET(mad_ifc_in, in, opcode, MLX5_CMD_OP_MAD_IFC);
MLX5_SET(mad_ifc_in, in, op_mod, opmod);
MLX5_SET(mad_ifc_in, in, port, port);
data = MLX5_ADDR_OF(mad_ifc_in, in, mad);
memcpy(data, inb, MLX5_FLD_SZ_BYTES(mad_ifc_in, mad));
err = mlx5_cmd_exec(dev, in, inlen, out, outlen);
if (err)
goto out;
resp = MLX5_ADDR_OF(mad_ifc_out, out, response_mad_packet);
memcpy(outb, resp,
MLX5_FLD_SZ_BYTES(mad_ifc_out, response_mad_packet));
out:
kfree(out);
kfree(in);
return err;
}

View File

@ -63,4 +63,6 @@ int mlx5_cmd_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn, u16 uid);
int mlx5_cmd_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn, u16 uid);
int mlx5_cmd_alloc_q_counter(struct mlx5_core_dev *dev, u16 *counter_id,
u16 uid);
int mlx5_cmd_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb,
u16 opmod, u8 port);
#endif /* MLX5_IB_CMD_H */

View File

@ -3,10 +3,11 @@
* Copyright (c) 2018 Mellanox Technologies. All rights reserved.
*/
#include <linux/mlx5/vport.h>
#include "ib_rep.h"
#include "srq.h"
static const struct mlx5_ib_profile rep_profile = {
static const struct mlx5_ib_profile vf_rep_profile = {
STAGE_CREATE(MLX5_IB_STAGE_INIT,
mlx5_ib_stage_init_init,
mlx5_ib_stage_init_cleanup),
@ -45,31 +46,17 @@ static const struct mlx5_ib_profile rep_profile = {
NULL),
};
static int
mlx5_ib_nic_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
{
struct mlx5_ib_dev *ibdev;
ibdev = mlx5_ib_rep_to_dev(rep);
if (!__mlx5_ib_add(ibdev, ibdev->profile))
return -EINVAL;
return 0;
}
static void
mlx5_ib_nic_rep_unload(struct mlx5_eswitch_rep *rep)
{
struct mlx5_ib_dev *ibdev;
ibdev = mlx5_ib_rep_to_dev(rep);
__mlx5_ib_remove(ibdev, ibdev->profile, MLX5_IB_STAGE_MAX);
}
static int
mlx5_ib_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
{
const struct mlx5_ib_profile *profile;
struct mlx5_ib_dev *ibdev;
if (rep->vport == MLX5_VPORT_UPLINK)
profile = &uplink_rep_profile;
else
profile = &vf_rep_profile;
ibdev = (struct mlx5_ib_dev *)ib_alloc_device(sizeof(*ibdev));
if (!ibdev)
return -ENOMEM;
@ -78,7 +65,7 @@ mlx5_ib_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
ibdev->mdev = dev;
ibdev->num_ports = max(MLX5_CAP_GEN(dev, num_ports),
MLX5_CAP_GEN(dev, num_vhca_ports));
if (!__mlx5_ib_add(ibdev, &rep_profile))
if (!__mlx5_ib_add(ibdev, profile))
return -EINVAL;
rep->rep_if[REP_IB].priv = ibdev;
@ -105,15 +92,14 @@ static void *mlx5_ib_vport_get_proto_dev(struct mlx5_eswitch_rep *rep)
return mlx5_ib_rep_to_dev(rep);
}
static void mlx5_ib_rep_register_vf_vports(struct mlx5_ib_dev *dev)
void mlx5_ib_register_vport_reps(struct mlx5_core_dev *mdev)
{
struct mlx5_eswitch *esw = dev->mdev->priv.eswitch;
int total_vfs = MLX5_TOTAL_VPORTS(dev->mdev);
struct mlx5_eswitch *esw = mdev->priv.eswitch;
int total_vports = MLX5_TOTAL_VPORTS(mdev);
struct mlx5_eswitch_rep_if rep_if = {};
int vport;
for (vport = 1; vport < total_vfs; vport++) {
struct mlx5_eswitch_rep_if rep_if = {};
for (vport = 0; vport < total_vports; vport++) {
rep_if.load = mlx5_ib_vport_rep_load;
rep_if.unload = mlx5_ib_vport_rep_unload;
rep_if.get_proto_dev = mlx5_ib_vport_get_proto_dev;
@ -121,39 +107,16 @@ static void mlx5_ib_rep_register_vf_vports(struct mlx5_ib_dev *dev)
}
}
static void mlx5_ib_rep_unregister_vf_vports(struct mlx5_ib_dev *dev)
void mlx5_ib_unregister_vport_reps(struct mlx5_core_dev *mdev)
{
struct mlx5_eswitch *esw = dev->mdev->priv.eswitch;
int total_vfs = MLX5_TOTAL_VPORTS(dev->mdev);
struct mlx5_eswitch *esw = mdev->priv.eswitch;
int total_vports = MLX5_TOTAL_VPORTS(mdev);
int vport;
for (vport = 1; vport < total_vfs; vport++)
for (vport = total_vports - 1; vport >= 0; vport--)
mlx5_eswitch_unregister_vport_rep(esw, vport, REP_IB);
}
void mlx5_ib_register_vport_reps(struct mlx5_ib_dev *dev)
{
struct mlx5_eswitch *esw = dev->mdev->priv.eswitch;
struct mlx5_eswitch_rep_if rep_if = {};
rep_if.load = mlx5_ib_nic_rep_load;
rep_if.unload = mlx5_ib_nic_rep_unload;
rep_if.get_proto_dev = mlx5_ib_vport_get_proto_dev;
rep_if.priv = dev;
mlx5_eswitch_register_vport_rep(esw, 0, &rep_if, REP_IB);
mlx5_ib_rep_register_vf_vports(dev);
}
void mlx5_ib_unregister_vport_reps(struct mlx5_ib_dev *dev)
{
struct mlx5_eswitch *esw = dev->mdev->priv.eswitch;
mlx5_ib_rep_unregister_vf_vports(dev); /* VFs vports */
mlx5_eswitch_unregister_vport_rep(esw, 0, REP_IB); /* UPLINK PF*/
}
u8 mlx5_ib_eswitch_mode(struct mlx5_eswitch *esw)
{
return mlx5_eswitch_mode(esw);

View File

@ -10,14 +10,16 @@
#include "mlx5_ib.h"
#ifdef CONFIG_MLX5_ESWITCH
extern const struct mlx5_ib_profile uplink_rep_profile;
u8 mlx5_ib_eswitch_mode(struct mlx5_eswitch *esw);
struct mlx5_ib_dev *mlx5_ib_get_rep_ibdev(struct mlx5_eswitch *esw,
int vport_index);
struct mlx5_ib_dev *mlx5_ib_get_uplink_ibdev(struct mlx5_eswitch *esw);
struct mlx5_eswitch_rep *mlx5_ib_vport_rep(struct mlx5_eswitch *esw,
int vport_index);
void mlx5_ib_register_vport_reps(struct mlx5_ib_dev *dev);
void mlx5_ib_unregister_vport_reps(struct mlx5_ib_dev *dev);
void mlx5_ib_register_vport_reps(struct mlx5_core_dev *mdev);
void mlx5_ib_unregister_vport_reps(struct mlx5_core_dev *mdev);
int create_flow_rule_vport_sq(struct mlx5_ib_dev *dev,
struct mlx5_ib_sq *sq);
struct net_device *mlx5_ib_get_rep_netdev(struct mlx5_eswitch *esw,
@ -48,8 +50,8 @@ struct mlx5_eswitch_rep *mlx5_ib_vport_rep(struct mlx5_eswitch *esw,
return NULL;
}
static inline void mlx5_ib_register_vport_reps(struct mlx5_ib_dev *dev) {}
static inline void mlx5_ib_unregister_vport_reps(struct mlx5_ib_dev *dev) {}
static inline void mlx5_ib_register_vport_reps(struct mlx5_core_dev *mdev) {}
static inline void mlx5_ib_unregister_vport_reps(struct mlx5_core_dev *mdev) {}
static inline int create_flow_rule_vport_sq(struct mlx5_ib_dev *dev,
struct mlx5_ib_sq *sq)
{

View File

@ -36,6 +36,7 @@
#include <rdma/ib_smi.h>
#include <rdma/ib_pma.h>
#include "mlx5_ib.h"
#include "cmd.h"
enum {
MLX5_IB_VENDOR_CLASS1 = 0x9,
@ -51,9 +52,10 @@ static bool can_do_mad_ifc(struct mlx5_ib_dev *dev, u8 port_num,
return dev->mdev->port_caps[port_num - 1].has_smi;
}
int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey,
u8 port, const struct ib_wc *in_wc, const struct ib_grh *in_grh,
const void *in_mad, void *response_mad)
static int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey,
int ignore_bkey, u8 port, const struct ib_wc *in_wc,
const struct ib_grh *in_grh, const void *in_mad,
void *response_mad)
{
u8 op_modifier = 0;
@ -68,7 +70,8 @@ int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey,
if (ignore_bkey || !in_wc)
op_modifier |= 0x2;
return mlx5_core_mad_ifc(dev->mdev, in_mad, response_mad, op_modifier, port);
return mlx5_cmd_mad_ifc(dev->mdev, in_mad, response_mad, op_modifier,
port);
}
static int process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,

View File

@ -331,7 +331,7 @@ out:
spin_unlock(&port->mp.mpi_lock);
}
static int translate_eth_proto_oper(u32 eth_proto_oper, u8 *active_speed,
static int translate_eth_legacy_proto_oper(u32 eth_proto_oper, u8 *active_speed,
u8 *active_width)
{
switch (eth_proto_oper) {
@ -389,10 +389,66 @@ static int translate_eth_proto_oper(u32 eth_proto_oper, u8 *active_speed,
return 0;
}
static int translate_eth_ext_proto_oper(u32 eth_proto_oper, u8 *active_speed,
u8 *active_width)
{
switch (eth_proto_oper) {
case MLX5E_PROT_MASK(MLX5E_SGMII_100M):
case MLX5E_PROT_MASK(MLX5E_1000BASE_X_SGMII):
*active_width = IB_WIDTH_1X;
*active_speed = IB_SPEED_SDR;
break;
case MLX5E_PROT_MASK(MLX5E_5GBASE_R):
*active_width = IB_WIDTH_1X;
*active_speed = IB_SPEED_DDR;
break;
case MLX5E_PROT_MASK(MLX5E_10GBASE_XFI_XAUI_1):
*active_width = IB_WIDTH_1X;
*active_speed = IB_SPEED_QDR;
break;
case MLX5E_PROT_MASK(MLX5E_40GBASE_XLAUI_4_XLPPI_4):
*active_width = IB_WIDTH_4X;
*active_speed = IB_SPEED_QDR;
break;
case MLX5E_PROT_MASK(MLX5E_25GAUI_1_25GBASE_CR_KR):
*active_width = IB_WIDTH_1X;
*active_speed = IB_SPEED_EDR;
break;
case MLX5E_PROT_MASK(MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2):
case MLX5E_PROT_MASK(MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR):
*active_width = IB_WIDTH_1X;
*active_speed = IB_SPEED_HDR;
break;
case MLX5E_PROT_MASK(MLX5E_100GAUI_2_100GBASE_CR2_KR2):
*active_width = IB_WIDTH_2X;
*active_speed = IB_SPEED_HDR;
break;
case MLX5E_PROT_MASK(MLX5E_200GAUI_4_200GBASE_CR4_KR4):
*active_width = IB_WIDTH_4X;
*active_speed = IB_SPEED_HDR;
break;
default:
return -EINVAL;
}
return 0;
}
static int translate_eth_proto_oper(u32 eth_proto_oper, u8 *active_speed,
u8 *active_width, bool ext)
{
return ext ?
translate_eth_ext_proto_oper(eth_proto_oper, active_speed,
active_width) :
translate_eth_legacy_proto_oper(eth_proto_oper, active_speed,
active_width);
}
static int mlx5_query_port_roce(struct ib_device *device, u8 port_num,
struct ib_port_attr *props)
{
struct mlx5_ib_dev *dev = to_mdev(device);
u32 out[MLX5_ST_SZ_DW(ptys_reg)] = {0};
struct mlx5_core_dev *mdev;
struct net_device *ndev, *upper;
enum ib_mtu ndev_ib_mtu;
@ -400,6 +456,7 @@ static int mlx5_query_port_roce(struct ib_device *device, u8 port_num,
u16 qkey_viol_cntr;
u32 eth_prot_oper;
u8 mdev_port_num;
bool ext;
int err;
mdev = mlx5_ib_get_native_port_mdev(dev, port_num, &mdev_port_num);
@ -416,16 +473,18 @@ static int mlx5_query_port_roce(struct ib_device *device, u8 port_num,
/* Possible bad flows are checked before filling out props so in case
* of an error it will still be zeroed out.
*/
err = mlx5_query_port_eth_proto_oper(mdev, &eth_prot_oper,
err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN,
mdev_port_num);
if (err)
goto out;
ext = MLX5_CAP_PCAM_FEATURE(dev->mdev, ptys_extended_ethernet);
eth_prot_oper = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, eth_proto_oper);
props->active_width = IB_WIDTH_4X;
props->active_speed = IB_SPEED_QDR;
translate_eth_proto_oper(eth_prot_oper, &props->active_speed,
&props->active_width);
&props->active_width, ext);
props->port_cap_flags |= IB_PORT_CM_SUP;
props->ip_gids = true;
@ -6386,7 +6445,7 @@ static const struct mlx5_ib_profile pf_profile = {
mlx5_ib_stage_delay_drop_cleanup),
};
static const struct mlx5_ib_profile nic_rep_profile = {
const struct mlx5_ib_profile uplink_rep_profile = {
STAGE_CREATE(MLX5_IB_STAGE_INIT,
mlx5_ib_stage_init_init,
mlx5_ib_stage_init_cleanup),
@ -6479,6 +6538,12 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
printk_once(KERN_INFO "%s", mlx5_version);
if (MLX5_ESWITCH_MANAGER(mdev) &&
mlx5_ib_eswitch_mode(mdev->priv.eswitch) == SRIOV_OFFLOADS) {
mlx5_ib_register_vport_reps(mdev);
return mdev;
}
port_type_cap = MLX5_CAP_GEN(mdev, port_type);
ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
@ -6493,14 +6558,6 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
dev->num_ports = max(MLX5_CAP_GEN(mdev, num_ports),
MLX5_CAP_GEN(mdev, num_vhca_ports));
if (MLX5_ESWITCH_MANAGER(mdev) &&
mlx5_ib_eswitch_mode(mdev->priv.eswitch) == SRIOV_OFFLOADS) {
dev->rep = mlx5_ib_vport_rep(mdev->priv.eswitch, 0);
dev->profile = &nic_rep_profile;
mlx5_ib_register_vport_reps(dev);
return dev;
}
return __mlx5_ib_add(dev, &pf_profile);
}
@ -6509,6 +6566,11 @@ static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context)
struct mlx5_ib_multiport_info *mpi;
struct mlx5_ib_dev *dev;
if (MLX5_ESWITCH_MANAGER(mdev) && context == mdev) {
mlx5_ib_unregister_vport_reps(mdev);
return;
}
if (mlx5_core_is_mp_slave(mdev)) {
mpi = context;
mutex_lock(&mlx5_ib_multiport_mutex);
@ -6520,9 +6582,6 @@ static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context)
}
dev = context;
if (dev->profile == &nic_rep_profile)
mlx5_ib_unregister_vport_reps(dev);
else
__mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX);
ib_dealloc_device((struct ib_device *)dev);

View File

@ -587,6 +587,7 @@ struct mlx5_ib_mr {
struct mlx5_ib_mr *parent;
atomic_t num_leaf_free;
wait_queue_head_t q_leaf_free;
struct mlx5_async_work cb_work;
};
struct mlx5_ib_mw {
@ -944,6 +945,7 @@ struct mlx5_ib_dev {
struct mlx5_memic memic;
u16 devx_whitelist_uid;
struct mlx5_srq_table srq_table;
struct mlx5_async_ctx async_ctx;
};
static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
@ -1038,9 +1040,6 @@ void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db)
void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index);
int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey,
u8 port, const struct ib_wc *in_wc, const struct ib_grh *in_grh,
const void *in_mad, void *response_mad);
struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
u32 flags, struct ib_udata *udata);
int mlx5_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);

View File

@ -123,9 +123,10 @@ static void update_odp_mr(struct mlx5_ib_mr *mr)
}
#endif
static void reg_mr_callback(int status, void *context)
static void reg_mr_callback(int status, struct mlx5_async_work *context)
{
struct mlx5_ib_mr *mr = context;
struct mlx5_ib_mr *mr =
container_of(context, struct mlx5_ib_mr, cb_work);
struct mlx5_ib_dev *dev = mr->dev;
struct mlx5_mr_cache *cache = &dev->cache;
int c = order2idx(dev, mr->order);
@ -216,9 +217,9 @@ static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
ent->pending++;
spin_unlock_irq(&ent->lock);
err = mlx5_core_create_mkey_cb(dev->mdev, &mr->mmkey,
in, inlen,
&dev->async_ctx, in, inlen,
mr->out, sizeof(mr->out),
reg_mr_callback, mr);
reg_mr_callback, &mr->cb_work);
if (err) {
spin_lock_irq(&ent->lock);
ent->pending--;
@ -679,6 +680,7 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
return -ENOMEM;
}
mlx5_cmd_init_async_ctx(dev->mdev, &dev->async_ctx);
timer_setup(&dev->delay_timer, delay_time_func, 0);
for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
ent = &cache->ent[i];
@ -725,33 +727,6 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
return 0;
}
static void wait_for_async_commands(struct mlx5_ib_dev *dev)
{
struct mlx5_mr_cache *cache = &dev->cache;
struct mlx5_cache_ent *ent;
int total = 0;
int i;
int j;
for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
ent = &cache->ent[i];
for (j = 0 ; j < 1000; j++) {
if (!ent->pending)
break;
msleep(50);
}
}
for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
ent = &cache->ent[i];
total += ent->pending;
}
if (total)
mlx5_ib_warn(dev, "aborted while there are %d pending mr requests\n", total);
else
mlx5_ib_warn(dev, "done with all pending requests\n");
}
int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
{
int i;
@ -763,12 +738,12 @@ int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
flush_workqueue(dev->cache.wq);
mlx5_mr_cache_debugfs_cleanup(dev);
mlx5_cmd_cleanup_async_ctx(&dev->async_ctx);
for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++)
clean_keys(dev, i);
destroy_workqueue(dev->cache.wq);
wait_for_async_commands(dev);
del_timer_sync(&dev->delay_timer);
return 0;

View File

@ -13,7 +13,7 @@ obj-$(CONFIG_MLX5_CORE) += mlx5_core.o
#
mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
health.o mcg.o cq.o alloc.o qp.o port.o mr.o pd.o \
mad.o transobj.o vport.o sriov.o fs_cmd.o fs_core.o \
transobj.o vport.o sriov.o fs_cmd.o fs_core.o \
fs_counters.o rl.o lag.o dev.o events.o wq.o lib/gid.o \
lib/devcom.o diag/fs_tracepoint.o diag/fw_tracer.o
@ -35,7 +35,7 @@ mlx5_core-$(CONFIG_MLX5_ESWITCH) += en_rep.o en_tc.o en/tc_tun.o
#
# Core extra
#
mlx5_core-$(CONFIG_MLX5_ESWITCH) += eswitch.o eswitch_offloads.o
mlx5_core-$(CONFIG_MLX5_ESWITCH) += eswitch.o eswitch_offloads.o ecpf.o
mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o
mlx5_core-$(CONFIG_VXLAN) += lib/vxlan.o
mlx5_core-$(CONFIG_PTP_1588_CLOCK) += lib/clock.o

View File

@ -316,6 +316,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_DESTROY_GENERAL_OBJECT:
case MLX5_CMD_OP_DEALLOC_MEMIC:
case MLX5_CMD_OP_PAGE_FAULT_RESUME:
case MLX5_CMD_OP_QUERY_HOST_PARAMS:
return MLX5_CMD_STAT_OK;
case MLX5_CMD_OP_QUERY_HCA_CAP:
@ -627,6 +628,7 @@ const char *mlx5_command_str(int command)
MLX5_COMMAND_STR_CASE(QUERY_MODIFY_HEADER_CONTEXT);
MLX5_COMMAND_STR_CASE(ALLOC_MEMIC);
MLX5_COMMAND_STR_CASE(DEALLOC_MEMIC);
MLX5_COMMAND_STR_CASE(QUERY_HOST_PARAMS);
default: return "unknown command opcode";
}
}
@ -1711,12 +1713,57 @@ int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
}
EXPORT_SYMBOL(mlx5_cmd_exec);
int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size,
void *out, int out_size, mlx5_cmd_cbk_t callback,
void *context)
void mlx5_cmd_init_async_ctx(struct mlx5_core_dev *dev,
struct mlx5_async_ctx *ctx)
{
return cmd_exec(dev, in, in_size, out, out_size, callback, context,
false);
ctx->dev = dev;
/* Starts at 1 to avoid doing wake_up if we are not cleaning up */
atomic_set(&ctx->num_inflight, 1);
init_waitqueue_head(&ctx->wait);
}
EXPORT_SYMBOL(mlx5_cmd_init_async_ctx);
/**
* mlx5_cmd_cleanup_async_ctx - Clean up an async_ctx
* @ctx: The ctx to clean
*
* Upon return all callbacks given to mlx5_cmd_exec_cb() have been called. The
* caller must ensure that mlx5_cmd_exec_cb() is not called during or after
* the call mlx5_cleanup_async_ctx().
*/
void mlx5_cmd_cleanup_async_ctx(struct mlx5_async_ctx *ctx)
{
atomic_dec(&ctx->num_inflight);
wait_event(ctx->wait, atomic_read(&ctx->num_inflight) == 0);
}
EXPORT_SYMBOL(mlx5_cmd_cleanup_async_ctx);
static void mlx5_cmd_exec_cb_handler(int status, void *_work)
{
struct mlx5_async_work *work = _work;
struct mlx5_async_ctx *ctx = work->ctx;
work->user_callback(status, work);
if (atomic_dec_and_test(&ctx->num_inflight))
wake_up(&ctx->wait);
}
int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size,
void *out, int out_size, mlx5_async_cbk_t callback,
struct mlx5_async_work *work)
{
int ret;
work->ctx = ctx;
work->user_callback = callback;
if (WARN_ON(!atomic_inc_not_zero(&ctx->num_inflight)))
return -EIO;
ret = cmd_exec(ctx->dev, in, in_size, out, out_size,
mlx5_cmd_exec_cb_handler, work, false);
if (ret && atomic_dec_and_test(&ctx->num_inflight))
wake_up(&ctx->wait);
return ret;
}
EXPORT_SYMBOL(mlx5_cmd_exec_cb);

View File

@ -0,0 +1,112 @@
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2019 Mellanox Technologies. */
#include "ecpf.h"
bool mlx5_read_embedded_cpu(struct mlx5_core_dev *dev)
{
return (ioread32be(&dev->iseg->initializing) >> MLX5_ECPU_BIT_NUM) & 1;
}
static int mlx5_peer_pf_enable_hca(struct mlx5_core_dev *dev)
{
u32 out[MLX5_ST_SZ_DW(enable_hca_out)] = {};
u32 in[MLX5_ST_SZ_DW(enable_hca_in)] = {};
MLX5_SET(enable_hca_in, in, opcode, MLX5_CMD_OP_ENABLE_HCA);
MLX5_SET(enable_hca_in, in, function_id, 0);
MLX5_SET(enable_hca_in, in, embedded_cpu_function, 0);
return mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
}
static int mlx5_peer_pf_disable_hca(struct mlx5_core_dev *dev)
{
u32 out[MLX5_ST_SZ_DW(disable_hca_out)] = {};
u32 in[MLX5_ST_SZ_DW(disable_hca_in)] = {};
MLX5_SET(disable_hca_in, in, opcode, MLX5_CMD_OP_DISABLE_HCA);
MLX5_SET(disable_hca_in, in, function_id, 0);
MLX5_SET(enable_hca_in, in, embedded_cpu_function, 0);
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
static int mlx5_peer_pf_init(struct mlx5_core_dev *dev)
{
int err;
err = mlx5_peer_pf_enable_hca(dev);
if (err)
mlx5_core_err(dev, "Failed to enable peer PF HCA err(%d)\n",
err);
return err;
}
static void mlx5_peer_pf_cleanup(struct mlx5_core_dev *dev)
{
int err;
err = mlx5_peer_pf_disable_hca(dev);
if (err) {
mlx5_core_err(dev, "Failed to disable peer PF HCA err(%d)\n",
err);
return;
}
err = mlx5_wait_for_pages(dev, &dev->priv.peer_pf_pages);
if (err)
mlx5_core_warn(dev, "Timeout reclaiming peer PF pages err(%d)\n",
err);
}
int mlx5_ec_init(struct mlx5_core_dev *dev)
{
int err = 0;
if (!mlx5_core_is_ecpf(dev))
return 0;
/* ECPF shall enable HCA for peer PF in the same way a PF
* does this for its VFs.
*/
err = mlx5_peer_pf_init(dev);
if (err)
return err;
return 0;
}
void mlx5_ec_cleanup(struct mlx5_core_dev *dev)
{
if (!mlx5_core_is_ecpf(dev))
return;
mlx5_peer_pf_cleanup(dev);
}
static int mlx5_query_host_params_context(struct mlx5_core_dev *dev,
u32 *out, int outlen)
{
u32 in[MLX5_ST_SZ_DW(query_host_params_in)] = {};
MLX5_SET(query_host_params_in, in, opcode,
MLX5_CMD_OP_QUERY_HOST_PARAMS);
return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
}
int mlx5_query_host_params_num_vfs(struct mlx5_core_dev *dev, int *num_vf)
{
u32 out[MLX5_ST_SZ_DW(query_host_params_out)] = {};
int err;
err = mlx5_query_host_params_context(dev, out, sizeof(out));
if (err)
return err;
*num_vf = MLX5_GET(query_host_params_out, out,
host_params_context.host_num_of_vfs);
mlx5_core_dbg(dev, "host_num_of_vfs %d\n", *num_vf);
return 0;
}

View File

@ -0,0 +1,33 @@
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2019 Mellanox Technologies. */
#ifndef __MLX5_ECPF_H__
#define __MLX5_ECPF_H__
#include <linux/mlx5/driver.h>
#include "mlx5_core.h"
#ifdef CONFIG_MLX5_ESWITCH
enum {
MLX5_ECPU_BIT_NUM = 23,
};
bool mlx5_read_embedded_cpu(struct mlx5_core_dev *dev);
int mlx5_ec_init(struct mlx5_core_dev *dev);
void mlx5_ec_cleanup(struct mlx5_core_dev *dev);
int mlx5_query_host_params_num_vfs(struct mlx5_core_dev *dev, int *num_vf);
#else /* CONFIG_MLX5_ESWITCH */
static inline bool
mlx5_read_embedded_cpu(struct mlx5_core_dev *dev) { return false; }
static inline int mlx5_ec_init(struct mlx5_core_dev *dev) { return 0; }
static inline void mlx5_ec_cleanup(struct mlx5_core_dev *dev) {}
static inline int
mlx5_query_host_params_num_vfs(struct mlx5_core_dev *dev, int *num_vf)
{ return -EOPNOTSUPP; }
#endif /* CONFIG_MLX5_ESWITCH */
#endif /* __MLX5_ECPF_H__ */

View File

@ -63,66 +63,168 @@ static const u32 mlx5e_link_speed[MLX5E_LINK_MODES_NUMBER] = {
[MLX5E_50GBASE_KR2] = 50000,
};
u32 mlx5e_port_ptys2speed(u32 eth_proto_oper)
static const u32 mlx5e_ext_link_speed[MLX5E_EXT_LINK_MODES_NUMBER] = {
[MLX5E_SGMII_100M] = 100,
[MLX5E_1000BASE_X_SGMII] = 1000,
[MLX5E_5GBASE_R] = 5000,
[MLX5E_10GBASE_XFI_XAUI_1] = 10000,
[MLX5E_40GBASE_XLAUI_4_XLPPI_4] = 40000,
[MLX5E_25GAUI_1_25GBASE_CR_KR] = 25000,
[MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2] = 50000,
[MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR] = 50000,
[MLX5E_CAUI_4_100GBASE_CR4_KR4] = 100000,
[MLX5E_200GAUI_4_200GBASE_CR4_KR4] = 200000,
[MLX5E_400GAUI_8] = 400000,
};
static void mlx5e_port_get_speed_arr(struct mlx5_core_dev *mdev,
const u32 **arr, u32 *size)
{
bool ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
*size = ext ? ARRAY_SIZE(mlx5e_ext_link_speed) :
ARRAY_SIZE(mlx5e_link_speed);
*arr = ext ? mlx5e_ext_link_speed : mlx5e_link_speed;
}
int mlx5_port_query_eth_proto(struct mlx5_core_dev *dev, u8 port, bool ext,
struct mlx5e_port_eth_proto *eproto)
{
u32 out[MLX5_ST_SZ_DW(ptys_reg)];
int err;
if (!eproto)
return -EINVAL;
if (ext != MLX5_CAP_PCAM_FEATURE(dev, ptys_extended_ethernet))
return -EOPNOTSUPP;
err = mlx5_query_port_ptys(dev, out, sizeof(out), MLX5_PTYS_EN, port);
if (err)
return err;
eproto->cap = MLX5_GET_ETH_PROTO(ptys_reg, out, ext,
eth_proto_capability);
eproto->admin = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, eth_proto_admin);
eproto->oper = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, eth_proto_oper);
return 0;
}
void mlx5_port_query_eth_autoneg(struct mlx5_core_dev *dev, u8 *an_status,
u8 *an_disable_cap, u8 *an_disable_admin)
{
u32 out[MLX5_ST_SZ_DW(ptys_reg)];
*an_status = 0;
*an_disable_cap = 0;
*an_disable_admin = 0;
if (mlx5_query_port_ptys(dev, out, sizeof(out), MLX5_PTYS_EN, 1))
return;
*an_status = MLX5_GET(ptys_reg, out, an_status);
*an_disable_cap = MLX5_GET(ptys_reg, out, an_disable_cap);
*an_disable_admin = MLX5_GET(ptys_reg, out, an_disable_admin);
}
int mlx5_port_set_eth_ptys(struct mlx5_core_dev *dev, bool an_disable,
u32 proto_admin, bool ext)
{
u32 out[MLX5_ST_SZ_DW(ptys_reg)];
u32 in[MLX5_ST_SZ_DW(ptys_reg)];
u8 an_disable_admin;
u8 an_disable_cap;
u8 an_status;
mlx5_port_query_eth_autoneg(dev, &an_status, &an_disable_cap,
&an_disable_admin);
if (!an_disable_cap && an_disable)
return -EPERM;
memset(in, 0, sizeof(in));
MLX5_SET(ptys_reg, in, local_port, 1);
MLX5_SET(ptys_reg, in, an_disable_admin, an_disable);
MLX5_SET(ptys_reg, in, proto_mask, MLX5_PTYS_EN);
if (ext)
MLX5_SET(ptys_reg, in, ext_eth_proto_admin, proto_admin);
else
MLX5_SET(ptys_reg, in, eth_proto_admin, proto_admin);
return mlx5_core_access_reg(dev, in, sizeof(in), out,
sizeof(out), MLX5_REG_PTYS, 0, 1);
}
u32 mlx5e_port_ptys2speed(struct mlx5_core_dev *mdev, u32 eth_proto_oper)
{
unsigned long temp = eth_proto_oper;
const u32 *table;
u32 speed = 0;
u32 max_size;
int i;
i = find_first_bit(&temp, MLX5E_LINK_MODES_NUMBER);
if (i < MLX5E_LINK_MODES_NUMBER)
speed = mlx5e_link_speed[i];
mlx5e_port_get_speed_arr(mdev, &table, &max_size);
i = find_first_bit(&temp, max_size);
if (i < max_size)
speed = table[i];
return speed;
}
int mlx5e_port_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
{
u32 out[MLX5_ST_SZ_DW(ptys_reg)] = {};
u32 eth_proto_oper;
struct mlx5e_port_eth_proto eproto;
bool ext;
int err;
err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN, 1);
ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
err = mlx5_port_query_eth_proto(mdev, 1, ext, &eproto);
if (err)
return err;
goto out;
eth_proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper);
*speed = mlx5e_port_ptys2speed(eth_proto_oper);
*speed = mlx5e_port_ptys2speed(mdev, eproto.oper);
if (!(*speed))
err = -EINVAL;
out:
return err;
}
int mlx5e_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
{
struct mlx5e_port_eth_proto eproto;
u32 max_speed = 0;
u32 proto_cap;
const u32 *table;
u32 max_size;
bool ext;
int err;
int i;
err = mlx5_query_port_proto_cap(mdev, &proto_cap, MLX5_PTYS_EN);
ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
err = mlx5_port_query_eth_proto(mdev, 1, ext, &eproto);
if (err)
return err;
for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i)
if (proto_cap & MLX5E_PROT_MASK(i))
max_speed = max(max_speed, mlx5e_link_speed[i]);
mlx5e_port_get_speed_arr(mdev, &table, &max_size);
for (i = 0; i < max_size; ++i)
if (eproto.cap & MLX5E_PROT_MASK(i))
max_speed = max(max_speed, table[i]);
*speed = max_speed;
return 0;
}
u32 mlx5e_port_speed2linkmodes(u32 speed)
u32 mlx5e_port_speed2linkmodes(struct mlx5_core_dev *mdev, u32 speed)
{
u32 link_modes = 0;
const u32 *table;
u32 max_size;
int i;
for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) {
if (mlx5e_link_speed[i] == speed)
mlx5e_port_get_speed_arr(mdev, &table, &max_size);
for (i = 0; i < max_size; ++i) {
if (table[i] == speed)
link_modes |= MLX5E_PROT_MASK(i);
}
return link_modes;
}

View File

@ -36,10 +36,22 @@
#include <linux/mlx5/driver.h>
#include "en.h"
u32 mlx5e_port_ptys2speed(u32 eth_proto_oper);
struct mlx5e_port_eth_proto {
u32 cap;
u32 admin;
u32 oper;
};
int mlx5_port_query_eth_proto(struct mlx5_core_dev *dev, u8 port, bool ext,
struct mlx5e_port_eth_proto *eproto);
void mlx5_port_query_eth_autoneg(struct mlx5_core_dev *dev, u8 *an_status,
u8 *an_disable_cap, u8 *an_disable_admin);
int mlx5_port_set_eth_ptys(struct mlx5_core_dev *dev, bool an_disable,
u32 proto_admin, bool ext);
u32 mlx5e_port_ptys2speed(struct mlx5_core_dev *mdev, u32 eth_proto_oper);
int mlx5e_port_linkspeed(struct mlx5_core_dev *mdev, u32 *speed);
int mlx5e_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed);
u32 mlx5e_port_speed2linkmodes(u32 speed);
u32 mlx5e_port_speed2linkmodes(struct mlx5_core_dev *mdev, u32 speed);
int mlx5e_port_query_pbmc(struct mlx5_core_dev *mdev, void *out);
int mlx5e_port_set_pbmc(struct mlx5_core_dev *mdev, void *in);

View File

@ -695,13 +695,14 @@ static void get_speed_duplex(struct net_device *netdev,
u32 eth_proto_oper,
struct ethtool_link_ksettings *link_ksettings)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
u32 speed = SPEED_UNKNOWN;
u8 duplex = DUPLEX_UNKNOWN;
if (!netif_carrier_ok(netdev))
goto out;
speed = mlx5e_port_ptys2speed(eth_proto_oper);
speed = mlx5e_port_ptys2speed(priv->mdev, eth_proto_oper);
if (!speed) {
speed = SPEED_UNKNOWN;
goto out;
@ -885,7 +886,7 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
const struct ethtool_link_ksettings *link_ksettings)
{
struct mlx5_core_dev *mdev = priv->mdev;
u32 eth_proto_cap, eth_proto_admin;
struct mlx5e_port_eth_proto eproto;
bool an_changes = false;
u8 an_disable_admin;
u8 an_disable_cap;
@ -899,16 +900,16 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
link_modes = link_ksettings->base.autoneg == AUTONEG_ENABLE ?
mlx5e_ethtool2ptys_adver_link(link_ksettings->link_modes.advertising) :
mlx5e_port_speed2linkmodes(speed);
mlx5e_port_speed2linkmodes(mdev, speed);
err = mlx5_query_port_proto_cap(mdev, &eth_proto_cap, MLX5_PTYS_EN);
err = mlx5_port_query_eth_proto(mdev, 1, false, &eproto);
if (err) {
netdev_err(priv->netdev, "%s: query port eth proto cap failed: %d\n",
netdev_err(priv->netdev, "%s: query port eth proto failed: %d\n",
__func__, err);
goto out;
}
link_modes = link_modes & eth_proto_cap;
link_modes = link_modes & eproto.cap;
if (!link_modes) {
netdev_err(priv->netdev, "%s: Not supported link mode(s) requested",
__func__);
@ -916,24 +917,17 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
goto out;
}
err = mlx5_query_port_proto_admin(mdev, &eth_proto_admin, MLX5_PTYS_EN);
if (err) {
netdev_err(priv->netdev, "%s: query port eth proto admin failed: %d\n",
__func__, err);
goto out;
}
mlx5_query_port_autoneg(mdev, MLX5_PTYS_EN, &an_status,
&an_disable_cap, &an_disable_admin);
mlx5_port_query_eth_autoneg(mdev, &an_status, &an_disable_cap,
&an_disable_admin);
an_disable = link_ksettings->base.autoneg == AUTONEG_DISABLE;
an_changes = ((!an_disable && an_disable_admin) ||
(an_disable && !an_disable_admin));
if (!an_changes && link_modes == eth_proto_admin)
if (!an_changes && link_modes == eproto.admin)
goto out;
mlx5_set_port_ptys(mdev, an_disable, link_modes, MLX5_PTYS_EN);
mlx5_port_set_eth_ptys(mdev, an_disable, link_modes, false);
mlx5_toggle_port_link(mdev);
out:

View File

@ -153,7 +153,7 @@ static void mlx5e_rep_update_hw_counters(struct mlx5e_priv *priv)
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5_eswitch_rep *rep = rpriv->rep;
if (rep->vport == FDB_UPLINK_VPORT)
if (rep->vport == MLX5_VPORT_UPLINK)
mlx5e_uplink_rep_update_hw_counters(priv);
else
mlx5e_vf_rep_update_hw_counters(priv);
@ -1119,7 +1119,7 @@ static int mlx5e_rep_get_phys_port_name(struct net_device *dev,
if (ret)
return ret;
if (rep->vport == FDB_UPLINK_VPORT)
if (rep->vport == MLX5_VPORT_UPLINK)
ret = snprintf(buf, len, "p%d", pf_num);
else
ret = snprintf(buf, len, "pf%dvf%d", pf_num, rep->vport - 1);
@ -1206,7 +1206,7 @@ bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv)
return false;
rep = rpriv->rep;
return (rep->vport == FDB_UPLINK_VPORT);
return (rep->vport == MLX5_VPORT_UPLINK);
}
static bool mlx5e_rep_has_offload_stats(const struct net_device *dev, int attr_id)
@ -1344,7 +1344,7 @@ static void mlx5e_build_rep_params(struct net_device *netdev)
params->sw_mtu = netdev->mtu;
/* SQ */
if (rep->vport == FDB_UPLINK_VPORT)
if (rep->vport == MLX5_VPORT_UPLINK)
params->log_sq_size = MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
else
params->log_sq_size = MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE;
@ -1371,7 +1371,7 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev)
struct mlx5_eswitch_rep *rep = rpriv->rep;
struct mlx5_core_dev *mdev = priv->mdev;
if (rep->vport == FDB_UPLINK_VPORT) {
if (rep->vport == MLX5_VPORT_UPLINK) {
SET_NETDEV_DEV(netdev, &priv->mdev->pdev->dev);
netdev->netdev_ops = &mlx5e_netdev_ops_uplink_rep;
/* we want a persistent mac for the uplink rep */
@ -1401,7 +1401,7 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev)
netdev->hw_features |= NETIF_F_TSO6;
netdev->hw_features |= NETIF_F_RXCSUM;
if (rep->vport != FDB_UPLINK_VPORT)
if (rep->vport != MLX5_VPORT_UPLINK)
netdev->features |= NETIF_F_VLAN_CHALLENGED;
netdev->features |= netdev->hw_features;
@ -1554,7 +1554,7 @@ static int mlx5e_init_rep_tx(struct mlx5e_priv *priv)
return err;
}
if (rpriv->rep->vport == FDB_UPLINK_VPORT) {
if (rpriv->rep->vport == MLX5_VPORT_UPLINK) {
uplink_priv = &rpriv->uplink_priv;
/* init shared tc flow table */
@ -1590,7 +1590,7 @@ static void mlx5e_cleanup_rep_tx(struct mlx5e_priv *priv)
for (tc = 0; tc < priv->profile->max_tc; tc++)
mlx5e_destroy_tis(priv->mdev, priv->tisn[tc]);
if (rpriv->rep->vport == FDB_UPLINK_VPORT) {
if (rpriv->rep->vport == MLX5_VPORT_UPLINK) {
/* clean indirect TC block notifications */
unregister_netdevice_notifier(&rpriv->uplink_priv.netdevice_nb);
mlx5e_rep_indr_clean_block_privs(rpriv);
@ -1709,7 +1709,7 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
rpriv->rep = rep;
nch = mlx5e_get_max_num_channels(dev);
profile = (rep->vport == FDB_UPLINK_VPORT) ? &mlx5e_uplink_rep_profile : &mlx5e_vf_rep_profile;
profile = (rep->vport == MLX5_VPORT_UPLINK) ? &mlx5e_uplink_rep_profile : &mlx5e_vf_rep_profile;
netdev = mlx5e_create_netdev(dev, profile, nch, rpriv);
if (!netdev) {
pr_warn("Failed to create representor netdev for vport %d\n",
@ -1722,7 +1722,7 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
rep->rep_if[REP_ETH].priv = rpriv;
INIT_LIST_HEAD(&rpriv->vport_sqs_list);
if (rep->vport == FDB_UPLINK_VPORT) {
if (rep->vport == MLX5_VPORT_UPLINK) {
err = mlx5e_create_mdev_resources(dev);
if (err)
goto err_destroy_netdev;
@ -1758,7 +1758,7 @@ err_detach_netdev:
mlx5e_detach_netdev(netdev_priv(netdev));
err_destroy_mdev_resources:
if (rep->vport == FDB_UPLINK_VPORT)
if (rep->vport == MLX5_VPORT_UPLINK)
mlx5e_destroy_mdev_resources(dev);
err_destroy_netdev:
@ -1778,7 +1778,7 @@ mlx5e_vport_rep_unload(struct mlx5_eswitch_rep *rep)
unregister_netdev(netdev);
mlx5e_rep_neigh_cleanup(rpriv);
mlx5e_detach_netdev(priv);
if (rep->vport == FDB_UPLINK_VPORT)
if (rep->vport == MLX5_VPORT_UPLINK)
mlx5e_destroy_mdev_resources(priv->mdev);
mlx5e_destroy_netdev(priv);
kfree(ppriv); /* mlx5e_rep_priv */

View File

@ -1776,7 +1776,7 @@ static int parse_cls_flower(struct mlx5e_priv *priv,
if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH)) {
rep = rpriv->rep;
if (rep->vport != FDB_UPLINK_VPORT &&
if (rep->vport != MLX5_VPORT_UPLINK &&
(esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE &&
esw->offloads.inline_mode < match_level)) {
NL_SET_ERR_MSG_MOD(extack,
@ -2693,7 +2693,7 @@ static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv, int flags)
static bool is_peer_flow_needed(struct mlx5e_tc_flow *flow)
{
struct mlx5_esw_flow_attr *attr = flow->esw_attr;
bool is_rep_ingress = attr->in_rep->vport != FDB_UPLINK_VPORT &&
bool is_rep_ingress = attr->in_rep->vport != MLX5_VPORT_UPLINK &&
flow->flags & MLX5E_TC_FLOW_INGRESS;
bool act_is_encap = !!(attr->action &
MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT);
@ -2816,7 +2816,7 @@ static int mlx5e_tc_add_fdb_peer_flow(struct tc_cls_flower_offload *f,
* original flow and packets redirected from uplink use the
* peer mdev.
*/
if (flow->esw_attr->in_rep->vport == FDB_UPLINK_VPORT)
if (flow->esw_attr->in_rep->vport == MLX5_VPORT_UPLINK)
in_mdev = peer_priv->mdev;
else
in_mdev = priv->mdev;

View File

@ -34,6 +34,7 @@
#include <linux/notifier.h>
#include <linux/module.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/vport.h>
#include <linux/mlx5/eq.h>
#include <linux/mlx5/cmd.h>
#ifdef CONFIG_RFS_ACCEL
@ -114,11 +115,11 @@ static struct mlx5_core_cq *mlx5_eq_cq_get(struct mlx5_eq *eq, u32 cqn)
struct mlx5_cq_table *table = &eq->cq_table;
struct mlx5_core_cq *cq = NULL;
spin_lock(&table->lock);
rcu_read_lock();
cq = radix_tree_lookup(&table->tree, cqn);
if (likely(cq))
mlx5_cq_hold(cq);
spin_unlock(&table->lock);
rcu_read_unlock();
return cq;
}
@ -371,9 +372,9 @@ int mlx5_eq_add_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq)
struct mlx5_cq_table *table = &eq->cq_table;
int err;
spin_lock_irq(&table->lock);
spin_lock(&table->lock);
err = radix_tree_insert(&table->tree, cq->cqn, cq);
spin_unlock_irq(&table->lock);
spin_unlock(&table->lock);
return err;
}
@ -383,9 +384,9 @@ int mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq)
struct mlx5_cq_table *table = &eq->cq_table;
struct mlx5_core_cq *tmp;
spin_lock_irq(&table->lock);
spin_lock(&table->lock);
tmp = radix_tree_delete(&table->tree, cq->cqn);
spin_unlock_irq(&table->lock);
spin_unlock(&table->lock);
if (!tmp) {
mlx5_core_warn(eq->dev, "cq 0x%x not found in eq 0x%x tree\n", eq->eqn, cq->cqn);
@ -530,6 +531,9 @@ static u64 gather_async_events_mask(struct mlx5_core_dev *dev)
if (MLX5_CAP_GEN(dev, max_num_of_monitor_counters))
async_event_mask |= (1ull << MLX5_EVENT_TYPE_MONITOR_COUNTER);
if (mlx5_core_is_ecpf_esw_manager(dev))
async_event_mask |= (1ull << MLX5_EVENT_TYPE_HOST_PARAMS_CHANGE);
return async_event_mask;
}

View File

@ -40,8 +40,6 @@
#include "eswitch.h"
#include "fs_core.h"
#define UPLINK_VPORT 0xFFFF
enum {
MLX5_ACTION_NONE = 0,
MLX5_ACTION_ADD = 1,
@ -52,7 +50,7 @@ enum {
struct vport_addr {
struct l2addr_node node;
u8 action;
u32 vport;
u16 vport;
struct mlx5_flow_handle *flow_rule;
bool mpfs; /* UC MAC was added to MPFs */
/* A flag indicating that mac was added due to mc promiscuous vport */
@ -115,7 +113,7 @@ static int modify_esw_vport_context_cmd(struct mlx5_core_dev *dev, u16 vport,
return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
}
static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport,
static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u16 vport,
u16 vlan, u8 qos, u8 set_flags)
{
u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {0};
@ -152,7 +150,7 @@ static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport,
/* E-Switch FDB */
static struct mlx5_flow_handle *
__esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule,
__esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u16 vport, bool rx_rule,
u8 mac_c[ETH_ALEN], u8 mac_v[ETH_ALEN])
{
int match_header = (is_zero_ether_addr(mac_c) ? 0 :
@ -188,7 +186,7 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule,
misc_parameters);
mc_misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
misc_parameters);
MLX5_SET(fte_match_set_misc, mv_misc, source_port, UPLINK_VPORT);
MLX5_SET(fte_match_set_misc, mv_misc, source_port, MLX5_VPORT_UPLINK);
MLX5_SET_TO_ONES(fte_match_set_misc, mc_misc, source_port);
}
@ -215,7 +213,7 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule,
}
static struct mlx5_flow_handle *
esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u32 vport)
esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u16 vport)
{
u8 mac_c[ETH_ALEN];
@ -224,7 +222,7 @@ esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u32 vport)
}
static struct mlx5_flow_handle *
esw_fdb_set_vport_allmulti_rule(struct mlx5_eswitch *esw, u32 vport)
esw_fdb_set_vport_allmulti_rule(struct mlx5_eswitch *esw, u16 vport)
{
u8 mac_c[ETH_ALEN];
u8 mac_v[ETH_ALEN];
@ -237,7 +235,7 @@ esw_fdb_set_vport_allmulti_rule(struct mlx5_eswitch *esw, u32 vport)
}
static struct mlx5_flow_handle *
esw_fdb_set_vport_promisc_rule(struct mlx5_eswitch *esw, u32 vport)
esw_fdb_set_vport_promisc_rule(struct mlx5_eswitch *esw, u16 vport)
{
u8 mac_c[ETH_ALEN];
u8 mac_v[ETH_ALEN];
@ -377,7 +375,7 @@ typedef int (*vport_addr_action)(struct mlx5_eswitch *esw,
static int esw_add_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
{
u8 *mac = vaddr->node.addr;
u32 vport = vaddr->vport;
u16 vport = vaddr->vport;
int err;
/* Skip mlx5_mpfs_add_mac for PFs,
@ -409,7 +407,7 @@ fdb_add:
static int esw_del_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
{
u8 *mac = vaddr->node.addr;
u32 vport = vaddr->vport;
u16 vport = vaddr->vport;
int err = 0;
/* Skip mlx5_mpfs_del_mac for PFs,
@ -438,7 +436,7 @@ static void update_allmulti_vports(struct mlx5_eswitch *esw,
struct esw_mc_addr *esw_mc)
{
u8 *mac = vaddr->node.addr;
u32 vport_idx = 0;
u16 vport_idx = 0;
for (vport_idx = 0; vport_idx < esw->total_vports; vport_idx++) {
struct mlx5_vport *vport = &esw->vports[vport_idx];
@ -485,7 +483,7 @@ static int esw_add_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
struct hlist_head *hash = esw->mc_table;
struct esw_mc_addr *esw_mc;
u8 *mac = vaddr->node.addr;
u32 vport = vaddr->vport;
u16 vport = vaddr->vport;
if (!esw->fdb_table.legacy.fdb)
return 0;
@ -499,7 +497,7 @@ static int esw_add_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
return -ENOMEM;
esw_mc->uplink_rule = /* Forward MC MAC to Uplink */
esw_fdb_set_vport_rule(esw, mac, UPLINK_VPORT);
esw_fdb_set_vport_rule(esw, mac, MLX5_VPORT_UPLINK);
/* Add this multicast mac to all the mc promiscuous vports */
update_allmulti_vports(esw, vaddr, esw_mc);
@ -525,7 +523,7 @@ static int esw_del_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
struct hlist_head *hash = esw->mc_table;
struct esw_mc_addr *esw_mc;
u8 *mac = vaddr->node.addr;
u32 vport = vaddr->vport;
u16 vport = vaddr->vport;
if (!esw->fdb_table.legacy.fdb)
return 0;
@ -564,7 +562,7 @@ static int esw_del_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
/* Apply vport UC/MC list to HW l2 table and FDB table */
static void esw_apply_vport_addr_list(struct mlx5_eswitch *esw,
u32 vport_num, int list_type)
u16 vport_num, int list_type)
{
struct mlx5_vport *vport = &esw->vports[vport_num];
bool is_uc = list_type == MLX5_NVPRT_LIST_TYPE_UC;
@ -599,7 +597,7 @@ static void esw_apply_vport_addr_list(struct mlx5_eswitch *esw,
/* Sync vport UC/MC list from vport context */
static void esw_update_vport_addr_list(struct mlx5_eswitch *esw,
u32 vport_num, int list_type)
u16 vport_num, int list_type)
{
struct mlx5_vport *vport = &esw->vports[vport_num];
bool is_uc = list_type == MLX5_NVPRT_LIST_TYPE_UC;
@ -686,7 +684,7 @@ out:
/* Sync vport UC/MC list from vport context
* Must be called after esw_update_vport_addr_list
*/
static void esw_update_vport_mc_promisc(struct mlx5_eswitch *esw, u32 vport_num)
static void esw_update_vport_mc_promisc(struct mlx5_eswitch *esw, u16 vport_num)
{
struct mlx5_vport *vport = &esw->vports[vport_num];
struct l2addr_node *node;
@ -721,7 +719,7 @@ static void esw_update_vport_mc_promisc(struct mlx5_eswitch *esw, u32 vport_num)
}
/* Apply vport rx mode to HW FDB table */
static void esw_apply_vport_rx_mode(struct mlx5_eswitch *esw, u32 vport_num,
static void esw_apply_vport_rx_mode(struct mlx5_eswitch *esw, u16 vport_num,
bool promisc, bool mc_promisc)
{
struct esw_mc_addr *allmulti_addr = &esw->mc_promisc;
@ -736,7 +734,7 @@ static void esw_apply_vport_rx_mode(struct mlx5_eswitch *esw, u32 vport_num,
if (!allmulti_addr->uplink_rule)
allmulti_addr->uplink_rule =
esw_fdb_set_vport_allmulti_rule(esw,
UPLINK_VPORT);
MLX5_VPORT_UPLINK);
allmulti_addr->refcnt++;
} else if (vport->allmulti_rule) {
mlx5_del_flow_rules(vport->allmulti_rule);
@ -764,7 +762,7 @@ promisc:
}
/* Sync vport rx mode from vport context */
static void esw_update_vport_rx_mode(struct mlx5_eswitch *esw, u32 vport_num)
static void esw_update_vport_rx_mode(struct mlx5_eswitch *esw, u16 vport_num)
{
struct mlx5_vport *vport = &esw->vports[vport_num];
int promisc_all = 0;
@ -1633,7 +1631,7 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
} else {
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
err = esw_offloads_init(esw, nvfs + 1);
err = esw_offloads_init(esw, nvfs + MLX5_SPECIAL_VPORTS);
}
if (err)

View File

@ -49,8 +49,6 @@
#define MLX5_MAX_MC_PER_VPORT(dev) \
(1 << MLX5_CAP_GEN(dev, log_max_current_mc_list))
#define FDB_UPLINK_VPORT 0xffff
#define MLX5_MIN_BW_SHARE 1
#define MLX5_RATE_TO_BW_SHARE(rate, divider, limit) \

View File

@ -46,6 +46,11 @@ enum {
FDB_SLOW_PATH
};
/* There are two match-all miss flows, one for unicast dst mac and
* one for multicast.
*/
#define MLX5_ESW_MISS_FLOWS (2)
#define fdb_prio_table(esw, chain, prio, level) \
(esw)->fdb_table.offloads.fdb_prio[(chain)][(prio)][(level)]
@ -360,15 +365,15 @@ static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
in_rep = attr->in_rep;
out_rep = attr->dests[0].rep;
if (push && in_rep->vport == FDB_UPLINK_VPORT)
if (push && in_rep->vport == MLX5_VPORT_UPLINK)
goto out_notsupp;
if (pop && out_rep->vport == FDB_UPLINK_VPORT)
if (pop && out_rep->vport == MLX5_VPORT_UPLINK)
goto out_notsupp;
/* vport has vlan push configured, can't offload VF --> wire rules w.o it */
if (!push && !pop && fwd)
if (in_rep->vlan && out_rep->vport == FDB_UPLINK_VPORT)
if (in_rep->vlan && out_rep->vport == MLX5_VPORT_UPLINK)
goto out_notsupp;
/* protects against (1) setting rules with different vlans to push and
@ -410,7 +415,7 @@ int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
if (!push && !pop && fwd) {
/* tracks VF --> wire rules without vlan push action */
if (attr->dests[0].rep->vport == FDB_UPLINK_VPORT) {
if (attr->dests[0].rep->vport == MLX5_VPORT_UPLINK) {
vport->vlan_refcount++;
attr->vlan_handled = true;
}
@ -470,7 +475,7 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
if (!push && !pop && fwd) {
/* tracks VF --> wire rules without vlan push action */
if (attr->dests[0].rep->vport == FDB_UPLINK_VPORT)
if (attr->dests[0].rep->vport == MLX5_VPORT_UPLINK)
vport->vlan_refcount--;
return 0;
@ -905,8 +910,8 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
esw->fdb_table.offloads.fdb_left[i] =
ESW_POOLS[i] <= fdb_max ? ESW_SIZE / ESW_POOLS[i] : 0;
table_size = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ + 2 +
esw->total_vports;
table_size = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ +
MLX5_ESW_MISS_FLOWS + esw->total_vports;
/* create the slow path fdb with encap set, so further table instances
* can be created at run time while VFs are probed if the FW allows that.
@ -1000,7 +1005,8 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
dmac[0] = 0x01;
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix + 2);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
ix + MLX5_ESW_MISS_FLOWS);
g = mlx5_create_flow_group(fdb, flow_group_in);
if (IS_ERR(g)) {
@ -1049,7 +1055,7 @@ static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
esw_destroy_offloads_fast_fdb_tables(esw);
}
static int esw_create_offloads_table(struct mlx5_eswitch *esw)
static int esw_create_offloads_table(struct mlx5_eswitch *esw, int nvports)
{
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_core_dev *dev = esw->dev;
@ -1063,7 +1069,7 @@ static int esw_create_offloads_table(struct mlx5_eswitch *esw)
return -EOPNOTSUPP;
}
ft_attr.max_fte = dev->priv.sriov.num_vfs + 2;
ft_attr.max_fte = nvports + MLX5_ESW_MISS_FLOWS;
ft_offloads = mlx5_create_flow_table(ns, &ft_attr);
if (IS_ERR(ft_offloads)) {
@ -1083,16 +1089,15 @@ static void esw_destroy_offloads_table(struct mlx5_eswitch *esw)
mlx5_destroy_flow_table(offloads->ft_offloads);
}
static int esw_create_vport_rx_group(struct mlx5_eswitch *esw)
static int esw_create_vport_rx_group(struct mlx5_eswitch *esw, int nvports)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_group *g;
struct mlx5_priv *priv = &esw->dev->priv;
u32 *flow_group_in;
void *match_criteria, *misc;
int err = 0;
int nvports = priv->sriov.num_vfs + 2;
nvports = nvports + MLX5_ESW_MISS_FLOWS;
flow_group_in = kvzalloc(inlen, GFP_KERNEL);
if (!flow_group_in)
return -ENOMEM;
@ -1228,7 +1233,7 @@ int esw_offloads_init_reps(struct mlx5_eswitch *esw)
ether_addr_copy(rep->hw_id, hw_id);
}
offloads->vport_reps[0].vport = FDB_UPLINK_VPORT;
offloads->vport_reps[0].vport = MLX5_VPORT_UPLINK;
return 0;
}
@ -1408,11 +1413,11 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
if (err)
return err;
err = esw_create_offloads_table(esw);
err = esw_create_offloads_table(esw, nvports);
if (err)
goto create_ft_err;
err = esw_create_vport_rx_group(esw);
err = esw_create_vport_rx_group(esw, nvports);
if (err)
goto create_fg_err;
@ -1812,7 +1817,7 @@ void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw,
struct mlx5_esw_offload *offloads = &esw->offloads;
struct mlx5_eswitch_rep *rep;
if (vport == FDB_UPLINK_VPORT)
if (vport == MLX5_VPORT_UPLINK)
vport = UPLINK_REP_INDEX;
rep = &offloads->vport_reps[vport];

View File

@ -103,6 +103,8 @@ static const char *eqe_type_str(u8 type)
return "MLX5_EVENT_TYPE_STALL_EVENT";
case MLX5_EVENT_TYPE_CMD:
return "MLX5_EVENT_TYPE_CMD";
case MLX5_EVENT_TYPE_HOST_PARAMS_CHANGE:
return "MLX5_EVENT_TYPE_HOST_PARAMS_CHANGE";
case MLX5_EVENT_TYPE_PAGE_REQUEST:
return "MLX5_EVENT_TYPE_PAGE_REQUEST";
case MLX5_EVENT_TYPE_PAGE_FAULT:

View File

@ -32,6 +32,7 @@
#include <linux/mutex.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/vport.h>
#include <linux/mlx5/eswitch.h>
#include "mlx5_core.h"

View File

@ -1,75 +0,0 @@
/*
* Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/cmd.h>
#include "mlx5_core.h"
int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb,
u16 opmod, u8 port)
{
int outlen = MLX5_ST_SZ_BYTES(mad_ifc_out);
int inlen = MLX5_ST_SZ_BYTES(mad_ifc_in);
int err = -ENOMEM;
void *data;
void *resp;
u32 *out;
u32 *in;
in = kzalloc(inlen, GFP_KERNEL);
out = kzalloc(outlen, GFP_KERNEL);
if (!in || !out)
goto out;
MLX5_SET(mad_ifc_in, in, opcode, MLX5_CMD_OP_MAD_IFC);
MLX5_SET(mad_ifc_in, in, op_mod, opmod);
MLX5_SET(mad_ifc_in, in, port, port);
data = MLX5_ADDR_OF(mad_ifc_in, in, mad);
memcpy(data, inb, MLX5_FLD_SZ_BYTES(mad_ifc_in, mad));
err = mlx5_cmd_exec(dev, in, inlen, out, outlen);
if (err)
goto out;
resp = MLX5_ADDR_OF(mad_ifc_out, out, response_mad_packet);
memcpy(outb, resp,
MLX5_FLD_SZ_BYTES(mad_ifc_out, response_mad_packet));
out:
kfree(out);
kfree(in);
return err;
}
EXPORT_SYMBOL_GPL(mlx5_core_mad_ifc);

View File

@ -65,6 +65,7 @@
#include "lib/vxlan.h"
#include "lib/devcom.h"
#include "diag/fw_tracer.h"
#include "ecpf.h"
MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
MODULE_DESCRIPTION("Mellanox 5th generation network adapters (ConnectX series) core driver");
@ -459,6 +460,50 @@ static int handle_hca_cap_atomic(struct mlx5_core_dev *dev)
return err;
}
static int handle_hca_cap_odp(struct mlx5_core_dev *dev)
{
void *set_hca_cap;
void *set_ctx;
int set_sz;
int err;
if (!MLX5_CAP_GEN(dev, pg))
return 0;
err = mlx5_core_get_caps(dev, MLX5_CAP_ODP);
if (err)
return err;
if (!(MLX5_CAP_ODP_MAX(dev, ud_odp_caps.srq_receive) ||
MLX5_CAP_ODP_MAX(dev, rc_odp_caps.srq_receive) ||
MLX5_CAP_ODP_MAX(dev, xrc_odp_caps.srq_receive)))
return 0;
set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
set_ctx = kzalloc(set_sz, GFP_KERNEL);
if (!set_ctx)
return -ENOMEM;
set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability);
memcpy(set_hca_cap, dev->caps.hca_cur[MLX5_CAP_ODP],
MLX5_ST_SZ_BYTES(odp_cap));
/* set ODP SRQ support for RC/UD and XRC transports */
MLX5_SET(odp_cap, set_hca_cap, ud_odp_caps.srq_receive,
MLX5_CAP_ODP_MAX(dev, ud_odp_caps.srq_receive));
MLX5_SET(odp_cap, set_hca_cap, rc_odp_caps.srq_receive,
MLX5_CAP_ODP_MAX(dev, rc_odp_caps.srq_receive));
MLX5_SET(odp_cap, set_hca_cap, xrc_odp_caps.srq_receive,
MLX5_CAP_ODP_MAX(dev, xrc_odp_caps.srq_receive));
err = set_caps(dev, set_ctx, set_sz, MLX5_SET_HCA_CAP_OP_MOD_ODP);
kfree(set_ctx);
return err;
}
static int handle_hca_cap(struct mlx5_core_dev *dev)
{
void *set_ctx = NULL;
@ -567,6 +612,8 @@ int mlx5_core_enable_hca(struct mlx5_core_dev *dev, u16 func_id)
MLX5_SET(enable_hca_in, in, opcode, MLX5_CMD_OP_ENABLE_HCA);
MLX5_SET(enable_hca_in, in, function_id, func_id);
MLX5_SET(enable_hca_in, in, embedded_cpu_function,
dev->caps.embedded_cpu);
return mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
}
@ -577,6 +624,8 @@ int mlx5_core_disable_hca(struct mlx5_core_dev *dev, u16 func_id)
MLX5_SET(disable_hca_in, in, opcode, MLX5_CMD_OP_DISABLE_HCA);
MLX5_SET(disable_hca_in, in, function_id, func_id);
MLX5_SET(enable_hca_in, in, embedded_cpu_function,
dev->caps.embedded_cpu);
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
@ -693,6 +742,11 @@ static int mlx5_pci_init(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
goto err_clr_master;
}
if (pci_enable_atomic_ops_to_root(pdev, PCI_EXP_DEVCAP2_ATOMIC_COMP32) &&
pci_enable_atomic_ops_to_root(pdev, PCI_EXP_DEVCAP2_ATOMIC_COMP64) &&
pci_enable_atomic_ops_to_root(pdev, PCI_EXP_DEVCAP2_ATOMIC_COMP128))
mlx5_core_dbg(dev, "Enabling pci atomics failed\n");
dev->iseg_base = pci_resource_start(dev->pdev, 0);
dev->iseg = ioremap(dev->iseg_base, sizeof(*dev->iseg));
if (!dev->iseg) {
@ -849,6 +903,7 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
struct pci_dev *pdev = dev->pdev;
int err;
dev->caps.embedded_cpu = mlx5_read_embedded_cpu(dev);
mutex_lock(&dev->intf_state_mutex);
if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
dev_warn(&dev->pdev->dev, "%s: interface is up, NOP\n",
@ -926,6 +981,12 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
goto reclaim_boot_pages;
}
err = handle_hca_cap_odp(dev);
if (err) {
dev_err(&pdev->dev, "handle_hca_cap_odp failed\n");
goto reclaim_boot_pages;
}
err = mlx5_satisfy_startup_pages(dev, 0);
if (err) {
dev_err(&pdev->dev, "failed to allocate init pages\n");
@ -1014,6 +1075,12 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
goto err_sriov;
}
err = mlx5_ec_init(dev);
if (err) {
dev_err(&pdev->dev, "Failed to init embedded CPU\n");
goto err_ec;
}
if (mlx5_device_registered(dev)) {
mlx5_attach_device(dev);
} else {
@ -1031,6 +1098,9 @@ out:
return 0;
err_reg_dev:
mlx5_ec_cleanup(dev);
err_ec:
mlx5_sriov_detach(dev);
err_sriov:
@ -1105,6 +1175,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
if (mlx5_device_registered(dev))
mlx5_detach_device(dev);
mlx5_ec_cleanup(dev);
mlx5_sriov_detach(dev);
mlx5_cleanup_fs(dev);
mlx5_accel_ipsec_cleanup(dev);

View File

@ -121,7 +121,7 @@ int mlx5_modify_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
u32 modify_bitmask);
int mlx5_destroy_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
u32 element_id);
int mlx5_wait_for_vf_pages(struct mlx5_core_dev *dev);
int mlx5_wait_for_pages(struct mlx5_core_dev *dev, int *pages);
u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev,
struct ptp_system_timestamp *sts);

View File

@ -51,9 +51,10 @@ void mlx5_cleanup_mkey_table(struct mlx5_core_dev *dev)
int mlx5_core_create_mkey_cb(struct mlx5_core_dev *dev,
struct mlx5_core_mkey *mkey,
u32 *in, int inlen,
u32 *out, int outlen,
mlx5_cmd_cbk_t callback, void *context)
struct mlx5_async_ctx *async_ctx, u32 *in,
int inlen, u32 *out, int outlen,
mlx5_async_cbk_t callback,
struct mlx5_async_work *context)
{
struct mlx5_mkey_table *table = &dev->priv.mkey_table;
u32 lout[MLX5_ST_SZ_DW(create_mkey_out)] = {0};
@ -71,7 +72,7 @@ int mlx5_core_create_mkey_cb(struct mlx5_core_dev *dev,
MLX5_SET(mkc, mkc, mkey_7_0, key);
if (callback)
return mlx5_cmd_exec_cb(dev, in, inlen, out, outlen,
return mlx5_cmd_exec_cb(async_ctx, in, inlen, out, outlen,
callback, context);
err = mlx5_cmd_exec(dev, in, inlen, lout, sizeof(lout));
@ -105,7 +106,7 @@ int mlx5_core_create_mkey(struct mlx5_core_dev *dev,
struct mlx5_core_mkey *mkey,
u32 *in, int inlen)
{
return mlx5_core_create_mkey_cb(dev, mkey, in, inlen,
return mlx5_core_create_mkey_cb(dev, mkey, NULL, in, inlen,
NULL, 0, NULL, NULL);
}
EXPORT_SYMBOL(mlx5_core_create_mkey);

View File

@ -48,6 +48,7 @@ enum {
struct mlx5_pages_req {
struct mlx5_core_dev *dev;
u16 func_id;
u8 ec_function;
s32 npages;
struct work_struct work;
};
@ -143,6 +144,7 @@ static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
MLX5_SET(query_pages_in, in, op_mod, boot ?
MLX5_QUERY_PAGES_IN_OP_MOD_BOOT_PAGES :
MLX5_QUERY_PAGES_IN_OP_MOD_INIT_PAGES);
MLX5_SET(query_pages_in, in, embedded_cpu_function, mlx5_core_is_ecpf(dev));
err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
if (err)
@ -253,7 +255,8 @@ err_mapping:
return err;
}
static void page_notify_fail(struct mlx5_core_dev *dev, u16 func_id)
static void page_notify_fail(struct mlx5_core_dev *dev, u16 func_id,
bool ec_function)
{
u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0};
u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {0};
@ -262,6 +265,7 @@ static void page_notify_fail(struct mlx5_core_dev *dev, u16 func_id)
MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES);
MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_CANT_GIVE);
MLX5_SET(manage_pages_in, in, function_id, func_id);
MLX5_SET(manage_pages_in, in, embedded_cpu_function, ec_function);
err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
if (err)
@ -270,7 +274,7 @@ static void page_notify_fail(struct mlx5_core_dev *dev, u16 func_id)
}
static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
int notify_fail)
int notify_fail, bool ec_function)
{
u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0};
int inlen = MLX5_ST_SZ_BYTES(manage_pages_in);
@ -305,6 +309,7 @@ retry:
MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_GIVE);
MLX5_SET(manage_pages_in, in, function_id, func_id);
MLX5_SET(manage_pages_in, in, input_num_entries, npages);
MLX5_SET(manage_pages_in, in, embedded_cpu_function, ec_function);
err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
if (err) {
@ -316,8 +321,11 @@ retry:
dev->priv.fw_pages += npages;
if (func_id)
dev->priv.vfs_pages += npages;
else if (mlx5_core_is_ecpf(dev) && !ec_function)
dev->priv.peer_pf_pages += npages;
mlx5_core_dbg(dev, "err %d\n", err);
mlx5_core_dbg(dev, "npages %d, ec_function %d, func_id 0x%x, err %d\n",
npages, ec_function, func_id, err);
kvfree(in);
return 0;
@ -328,7 +336,7 @@ out_4k:
out_free:
kvfree(in);
if (notify_fail)
page_notify_fail(dev, func_id);
page_notify_fail(dev, func_id, ec_function);
return err;
}
@ -364,7 +372,7 @@ static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
}
static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
int *nclaimed)
int *nclaimed, bool ec_function)
{
int outlen = MLX5_ST_SZ_BYTES(manage_pages_out);
u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {0};
@ -385,6 +393,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_TAKE);
MLX5_SET(manage_pages_in, in, function_id, func_id);
MLX5_SET(manage_pages_in, in, input_num_entries, npages);
MLX5_SET(manage_pages_in, in, embedded_cpu_function, ec_function);
mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen);
err = reclaim_pages_cmd(dev, in, sizeof(in), out, outlen);
@ -410,6 +419,8 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
dev->priv.fw_pages -= num_claimed;
if (func_id)
dev->priv.vfs_pages -= num_claimed;
else if (mlx5_core_is_ecpf(dev) && !ec_function)
dev->priv.peer_pf_pages -= num_claimed;
out_free:
kvfree(out);
@ -423,9 +434,10 @@ static void pages_work_handler(struct work_struct *work)
int err = 0;
if (req->npages < 0)
err = reclaim_pages(dev, req->func_id, -1 * req->npages, NULL);
err = reclaim_pages(dev, req->func_id, -1 * req->npages, NULL,
req->ec_function);
else if (req->npages > 0)
err = give_pages(dev, req->func_id, req->npages, 1);
err = give_pages(dev, req->func_id, req->npages, 1, req->ec_function);
if (err)
mlx5_core_warn(dev, "%s fail %d\n",
@ -434,6 +446,10 @@ static void pages_work_handler(struct work_struct *work)
kfree(req);
}
enum {
EC_FUNCTION_MASK = 0x8000,
};
static int req_pages_handler(struct notifier_block *nb,
unsigned long type, void *data)
{
@ -441,6 +457,7 @@ static int req_pages_handler(struct notifier_block *nb,
struct mlx5_core_dev *dev;
struct mlx5_priv *priv;
struct mlx5_eqe *eqe;
bool ec_function;
u16 func_id;
s32 npages;
@ -450,6 +467,7 @@ static int req_pages_handler(struct notifier_block *nb,
func_id = be16_to_cpu(eqe->data.req_pages.func_id);
npages = be32_to_cpu(eqe->data.req_pages.num_pages);
ec_function = be16_to_cpu(eqe->data.req_pages.ec_function) & EC_FUNCTION_MASK;
mlx5_core_dbg(dev, "page request for func 0x%x, npages %d\n",
func_id, npages);
req = kzalloc(sizeof(*req), GFP_ATOMIC);
@ -461,6 +479,7 @@ static int req_pages_handler(struct notifier_block *nb,
req->dev = dev;
req->func_id = func_id;
req->npages = npages;
req->ec_function = ec_function;
INIT_WORK(&req->work, pages_work_handler);
queue_work(dev->priv.pg_wq, &req->work);
return NOTIFY_OK;
@ -479,7 +498,7 @@ int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot)
mlx5_core_dbg(dev, "requested %d %s pages for func_id 0x%x\n",
npages, boot ? "boot" : "init", func_id);
return give_pages(dev, func_id, npages, 0);
return give_pages(dev, func_id, npages, 0, mlx5_core_is_ecpf(dev));
}
enum {
@ -513,7 +532,7 @@ int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
fwp = rb_entry(p, struct fw_page, rb_node);
err = reclaim_pages(dev, fwp->func_id,
optimal_reclaimed_pages(),
&nclaimed);
&nclaimed, mlx5_core_is_ecpf(dev));
if (err) {
mlx5_core_warn(dev, "failed reclaiming pages (%d)\n",
@ -535,6 +554,9 @@ int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
WARN(dev->priv.vfs_pages,
"VFs FW pages counter is %d after reclaiming all pages\n",
dev->priv.vfs_pages);
WARN(dev->priv.peer_pf_pages,
"Peer PF FW pages counter is %d after reclaiming all pages\n",
dev->priv.peer_pf_pages);
return 0;
}
@ -567,10 +589,10 @@ void mlx5_pagealloc_stop(struct mlx5_core_dev *dev)
flush_workqueue(dev->priv.pg_wq);
}
int mlx5_wait_for_vf_pages(struct mlx5_core_dev *dev)
int mlx5_wait_for_pages(struct mlx5_core_dev *dev, int *pages)
{
unsigned long end = jiffies + msecs_to_jiffies(MAX_RECLAIM_VFS_PAGES_TIME_MSECS);
int prev_vfs_pages = dev->priv.vfs_pages;
int prev_pages = *pages;
/* In case of internal error we will free the pages manually later */
if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
@ -578,16 +600,16 @@ int mlx5_wait_for_vf_pages(struct mlx5_core_dev *dev)
return 0;
}
mlx5_core_dbg(dev, "Waiting for %d pages from %s\n", prev_vfs_pages,
mlx5_core_dbg(dev, "Waiting for %d pages from %s\n", prev_pages,
dev->priv.name);
while (dev->priv.vfs_pages) {
while (*pages) {
if (time_after(jiffies, end)) {
mlx5_core_warn(dev, "aborting while there are %d pending pages\n", dev->priv.vfs_pages);
mlx5_core_warn(dev, "aborting while there are %d pending pages\n", *pages);
return -ETIMEDOUT;
}
if (dev->priv.vfs_pages < prev_vfs_pages) {
if (*pages < prev_pages) {
end = jiffies + msecs_to_jiffies(MAX_RECLAIM_VFS_PAGES_TIME_MSECS);
prev_vfs_pages = dev->priv.vfs_pages;
prev_pages = *pages;
}
msleep(50);
}

View File

@ -30,10 +30,7 @@
* SOFTWARE.
*/
#include <linux/module.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/port.h>
#include <linux/mlx5/cmd.h>
#include "mlx5_core.h"
int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
@ -157,44 +154,6 @@ int mlx5_set_port_beacon(struct mlx5_core_dev *dev, u16 beacon_duration)
sizeof(out), MLX5_REG_MLCR, 0, 1);
}
int mlx5_query_port_proto_cap(struct mlx5_core_dev *dev,
u32 *proto_cap, int proto_mask)
{
u32 out[MLX5_ST_SZ_DW(ptys_reg)];
int err;
err = mlx5_query_port_ptys(dev, out, sizeof(out), proto_mask, 1);
if (err)
return err;
if (proto_mask == MLX5_PTYS_EN)
*proto_cap = MLX5_GET(ptys_reg, out, eth_proto_capability);
else
*proto_cap = MLX5_GET(ptys_reg, out, ib_proto_capability);
return 0;
}
EXPORT_SYMBOL_GPL(mlx5_query_port_proto_cap);
int mlx5_query_port_proto_admin(struct mlx5_core_dev *dev,
u32 *proto_admin, int proto_mask)
{
u32 out[MLX5_ST_SZ_DW(ptys_reg)];
int err;
err = mlx5_query_port_ptys(dev, out, sizeof(out), proto_mask, 1);
if (err)
return err;
if (proto_mask == MLX5_PTYS_EN)
*proto_admin = MLX5_GET(ptys_reg, out, eth_proto_admin);
else
*proto_admin = MLX5_GET(ptys_reg, out, ib_proto_admin);
return 0;
}
EXPORT_SYMBOL_GPL(mlx5_query_port_proto_admin);
int mlx5_query_port_link_width_oper(struct mlx5_core_dev *dev,
u8 *link_width_oper, u8 local_port)
{
@ -211,23 +170,6 @@ int mlx5_query_port_link_width_oper(struct mlx5_core_dev *dev,
}
EXPORT_SYMBOL_GPL(mlx5_query_port_link_width_oper);
int mlx5_query_port_eth_proto_oper(struct mlx5_core_dev *dev,
u32 *proto_oper, u8 local_port)
{
u32 out[MLX5_ST_SZ_DW(ptys_reg)];
int err;
err = mlx5_query_port_ptys(dev, out, sizeof(out), MLX5_PTYS_EN,
local_port);
if (err)
return err;
*proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper);
return 0;
}
EXPORT_SYMBOL(mlx5_query_port_eth_proto_oper);
int mlx5_query_port_ib_proto_oper(struct mlx5_core_dev *dev,
u8 *proto_oper, u8 local_port)
{
@ -245,35 +187,6 @@ int mlx5_query_port_ib_proto_oper(struct mlx5_core_dev *dev,
}
EXPORT_SYMBOL(mlx5_query_port_ib_proto_oper);
int mlx5_set_port_ptys(struct mlx5_core_dev *dev, bool an_disable,
u32 proto_admin, int proto_mask)
{
u32 out[MLX5_ST_SZ_DW(ptys_reg)];
u32 in[MLX5_ST_SZ_DW(ptys_reg)];
u8 an_disable_admin;
u8 an_disable_cap;
u8 an_status;
mlx5_query_port_autoneg(dev, proto_mask, &an_status,
&an_disable_cap, &an_disable_admin);
if (!an_disable_cap && an_disable)
return -EPERM;
memset(in, 0, sizeof(in));
MLX5_SET(ptys_reg, in, local_port, 1);
MLX5_SET(ptys_reg, in, an_disable_admin, an_disable);
MLX5_SET(ptys_reg, in, proto_mask, proto_mask);
if (proto_mask == MLX5_PTYS_EN)
MLX5_SET(ptys_reg, in, eth_proto_admin, proto_admin);
else
MLX5_SET(ptys_reg, in, ib_proto_admin, proto_admin);
return mlx5_core_access_reg(dev, in, sizeof(in), out,
sizeof(out), MLX5_REG_PTYS, 0, 1);
}
EXPORT_SYMBOL_GPL(mlx5_set_port_ptys);
/* This function should be used after setting a port register only */
void mlx5_toggle_port_link(struct mlx5_core_dev *dev)
{
@ -606,25 +519,6 @@ int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx, u8 *pfc_en_rx)
}
EXPORT_SYMBOL_GPL(mlx5_query_port_pfc);
void mlx5_query_port_autoneg(struct mlx5_core_dev *dev, int proto_mask,
u8 *an_status,
u8 *an_disable_cap, u8 *an_disable_admin)
{
u32 out[MLX5_ST_SZ_DW(ptys_reg)];
*an_status = 0;
*an_disable_cap = 0;
*an_disable_admin = 0;
if (mlx5_query_port_ptys(dev, out, sizeof(out), proto_mask, 1))
return;
*an_status = MLX5_GET(ptys_reg, out, an_status);
*an_disable_cap = MLX5_GET(ptys_reg, out, an_disable_cap);
*an_disable_admin = MLX5_GET(ptys_reg, out, an_disable_admin);
}
EXPORT_SYMBOL_GPL(mlx5_query_port_autoneg);
int mlx5_max_tc(struct mlx5_core_dev *mdev)
{
u8 num_tc = MLX5_CAP_GEN(mdev, max_tc) ? : 8;

View File

@ -147,7 +147,7 @@ out:
if (MLX5_ESWITCH_MANAGER(dev))
mlx5_eswitch_disable_sriov(dev->priv.eswitch);
if (mlx5_wait_for_vf_pages(dev))
if (mlx5_wait_for_pages(dev, &dev->priv.vfs_pages))
mlx5_core_warn(dev, "timeout reclaiming VFs pages\n");
}

View File

@ -255,7 +255,7 @@ int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu)
EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mtu);
int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
u32 vport,
u16 vport,
enum mlx5_list_type list_type,
u8 addr_list[][ETH_ALEN],
int *list_size)
@ -373,7 +373,7 @@ int mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev *dev,
EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_list);
int mlx5_query_nic_vport_vlans(struct mlx5_core_dev *dev,
u32 vport,
u16 vport,
u16 vlans[],
int *size)
{
@ -526,7 +526,7 @@ int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_node_guid);
int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
u32 vport, u64 node_guid)
u16 vport, u64 node_guid)
{
int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
void *nic_vport_context;
@ -827,7 +827,7 @@ int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *dev,
EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_node_guid);
int mlx5_query_nic_vport_promisc(struct mlx5_core_dev *mdev,
u32 vport,
u16 vport,
int *promisc_uc,
int *promisc_mc,
int *promisc_all)

View File

@ -67,7 +67,7 @@
#define MLX5_UN_SZ_BYTES(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 8)
#define MLX5_UN_SZ_DW(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 32)
#define MLX5_BYTE_OFF(typ, fld) (__mlx5_bit_off(typ, fld) / 8)
#define MLX5_ADDR_OF(typ, p, fld) ((char *)(p) + MLX5_BYTE_OFF(typ, fld))
#define MLX5_ADDR_OF(typ, p, fld) ((void *)((uint8_t *)(p) + MLX5_BYTE_OFF(typ, fld)))
/* insert a value to a struct */
#define MLX5_SET(typ, p, fld, v) do { \
@ -342,6 +342,8 @@ enum mlx5_event {
MLX5_EVENT_TYPE_PAGE_FAULT = 0xc,
MLX5_EVENT_TYPE_NIC_VPORT_CHANGE = 0xd,
MLX5_EVENT_TYPE_HOST_PARAMS_CHANGE = 0xe,
MLX5_EVENT_TYPE_DCT_DRAINED = 0x1c,
MLX5_EVENT_TYPE_FPGA_ERROR = 0x20,
@ -591,7 +593,7 @@ struct mlx5_eqe_cmd {
};
struct mlx5_eqe_page_req {
u8 rsvd0[2];
__be16 ec_function;
__be16 func_id;
__be32 num_pages;
__be32 rsvd1[5];
@ -1201,6 +1203,9 @@ enum mlx5_qcam_feature_groups {
#define MLX5_CAP_ODP(mdev, cap)\
MLX5_GET(odp_cap, mdev->caps.hca_cur[MLX5_CAP_ODP], cap)
#define MLX5_CAP_ODP_MAX(mdev, cap)\
MLX5_GET(odp_cap, mdev->caps.hca_max[MLX5_CAP_ODP], cap)
#define MLX5_CAP_VECTOR_CALC(mdev, cap) \
MLX5_GET(vector_calc_cap, \
mdev->caps.hca_cur[MLX5_CAP_VECTOR_CALC], cap)

View File

@ -522,6 +522,7 @@ struct mlx5_priv {
atomic_t reg_pages;
struct list_head free_list;
int vfs_pages;
int peer_pf_pages;
struct mlx5_core_health health;
@ -652,6 +653,7 @@ struct mlx5_core_dev {
u32 mcam[MLX5_ST_SZ_DW(mcam_reg)];
u32 fpga[MLX5_ST_SZ_DW(fpga_cap)];
u32 qcam[MLX5_ST_SZ_DW(qcam_reg)];
u8 embedded_cpu;
} caps;
u64 sys_image_guid;
phys_addr_t iseg_base;
@ -850,11 +852,30 @@ void mlx5_cmd_cleanup(struct mlx5_core_dev *dev);
void mlx5_cmd_use_events(struct mlx5_core_dev *dev);
void mlx5_cmd_use_polling(struct mlx5_core_dev *dev);
struct mlx5_async_ctx {
struct mlx5_core_dev *dev;
atomic_t num_inflight;
struct wait_queue_head wait;
};
struct mlx5_async_work;
typedef void (*mlx5_async_cbk_t)(int status, struct mlx5_async_work *context);
struct mlx5_async_work {
struct mlx5_async_ctx *ctx;
mlx5_async_cbk_t user_callback;
};
void mlx5_cmd_init_async_ctx(struct mlx5_core_dev *dev,
struct mlx5_async_ctx *ctx);
void mlx5_cmd_cleanup_async_ctx(struct mlx5_async_ctx *ctx);
int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size,
void *out, int out_size, mlx5_async_cbk_t callback,
struct mlx5_async_work *work);
int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
int out_size);
int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size,
void *out, int out_size, mlx5_cmd_cbk_t callback,
void *context);
int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size,
void *out, int out_size);
void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome);
@ -885,9 +906,10 @@ void mlx5_init_mkey_table(struct mlx5_core_dev *dev);
void mlx5_cleanup_mkey_table(struct mlx5_core_dev *dev);
int mlx5_core_create_mkey_cb(struct mlx5_core_dev *dev,
struct mlx5_core_mkey *mkey,
u32 *in, int inlen,
u32 *out, int outlen,
mlx5_cmd_cbk_t callback, void *context);
struct mlx5_async_ctx *async_ctx, u32 *in,
int inlen, u32 *out, int outlen,
mlx5_async_cbk_t callback,
struct mlx5_async_work *context);
int mlx5_core_create_mkey(struct mlx5_core_dev *dev,
struct mlx5_core_mkey *mkey,
u32 *in, int inlen);
@ -897,14 +919,12 @@ int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey,
u32 *out, int outlen);
int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn);
int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn);
int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb,
u16 opmod, u8 port);
int mlx5_pagealloc_init(struct mlx5_core_dev *dev);
void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev);
void mlx5_pagealloc_start(struct mlx5_core_dev *dev);
void mlx5_pagealloc_stop(struct mlx5_core_dev *dev);
void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
s32 npages);
s32 npages, bool ec_function);
int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot);
int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev);
void mlx5_register_debugfs(void);
@ -1058,11 +1078,24 @@ static inline int mlx5_core_is_pf(struct mlx5_core_dev *dev)
return !(dev->priv.pci_dev_data & MLX5_PCI_DEV_IS_VF);
}
#define MLX5_TOTAL_VPORTS(mdev) (1 + pci_sriov_get_totalvfs((mdev)->pdev))
#define MLX5_VPORT_MANAGER(mdev) \
(MLX5_CAP_GEN(mdev, vport_group_manager) && \
(MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) && \
mlx5_core_is_pf(mdev))
static inline bool mlx5_core_is_ecpf(struct mlx5_core_dev *dev)
{
return dev->caps.embedded_cpu;
}
static inline bool mlx5_core_is_ecpf_esw_manager(struct mlx5_core_dev *dev)
{
return dev->caps.embedded_cpu && MLX5_CAP_GEN(dev, eswitch_manager);
}
#define MLX5_HOST_PF_MAX_VFS (127u)
static inline u16 mlx5_core_max_vfs(struct mlx5_core_dev *dev)
{
if (mlx5_core_is_ecpf_esw_manager(dev))
return MLX5_HOST_PF_MAX_VFS;
else
return pci_sriov_get_totalvfs(dev->pdev);
}
static inline int mlx5_get_gid_table_len(u16 param)
{

View File

@ -72,6 +72,7 @@ enum {
enum {
MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE = 0x0,
MLX5_SET_HCA_CAP_OP_MOD_ODP = 0x2,
MLX5_SET_HCA_CAP_OP_MOD_ATOMIC = 0x3,
};
@ -141,6 +142,7 @@ enum {
MLX5_CMD_OP_QUERY_XRQ_DC_PARAMS_ENTRY = 0x725,
MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY = 0x726,
MLX5_CMD_OP_QUERY_XRQ_ERROR_PARAMS = 0x727,
MLX5_CMD_OP_QUERY_HOST_PARAMS = 0x740,
MLX5_CMD_OP_QUERY_VPORT_STATE = 0x750,
MLX5_CMD_OP_MODIFY_VPORT_STATE = 0x751,
MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT = 0x752,
@ -831,7 +833,9 @@ struct mlx5_ifc_odp_cap_bits {
struct mlx5_ifc_odp_per_transport_service_cap_bits ud_odp_caps;
u8 reserved_at_e0[0x720];
struct mlx5_ifc_odp_per_transport_service_cap_bits xrc_odp_caps;
u8 reserved_at_100[0x700];
};
struct mlx5_ifc_calc_op {
@ -4438,7 +4442,8 @@ struct mlx5_ifc_query_pages_out_bits {
u8 syndrome[0x20];
u8 reserved_at_40[0x10];
u8 embedded_cpu_function[0x1];
u8 reserved_at_41[0xf];
u8 function_id[0x10];
u8 num_pages[0x20];
@ -4457,7 +4462,8 @@ struct mlx5_ifc_query_pages_in_bits {
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
u8 reserved_at_40[0x10];
u8 embedded_cpu_function[0x1];
u8 reserved_at_41[0xf];
u8 function_id[0x10];
u8 reserved_at_60[0x20];
@ -5877,7 +5883,8 @@ struct mlx5_ifc_manage_pages_in_bits {
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
u8 reserved_at_40[0x10];
u8 embedded_cpu_function[0x1];
u8 reserved_at_41[0xf];
u8 function_id[0x10];
u8 input_num_entries[0x20];
@ -6055,7 +6062,8 @@ struct mlx5_ifc_enable_hca_in_bits {
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
u8 reserved_at_40[0x10];
u8 embedded_cpu_function[0x1];
u8 reserved_at_41[0xf];
u8 function_id[0x10];
u8 reserved_at_60[0x20];
@ -6099,7 +6107,8 @@ struct mlx5_ifc_disable_hca_in_bits {
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
u8 reserved_at_40[0x10];
u8 embedded_cpu_function[0x1];
u8 reserved_at_41[0xf];
u8 function_id[0x10];
u8 reserved_at_60[0x20];
@ -7817,21 +7826,23 @@ struct mlx5_ifc_ptys_reg_bits {
u8 proto_mask[0x3];
u8 an_status[0x4];
u8 reserved_at_24[0x3c];
u8 reserved_at_24[0x1c];
u8 ext_eth_proto_capability[0x20];
u8 eth_proto_capability[0x20];
u8 ib_link_width_capability[0x10];
u8 ib_proto_capability[0x10];
u8 reserved_at_a0[0x20];
u8 ext_eth_proto_admin[0x20];
u8 eth_proto_admin[0x20];
u8 ib_link_width_admin[0x10];
u8 ib_proto_admin[0x10];
u8 reserved_at_100[0x20];
u8 ext_eth_proto_oper[0x20];
u8 eth_proto_oper[0x20];
@ -8280,7 +8291,9 @@ struct mlx5_ifc_mpegc_reg_bits {
struct mlx5_ifc_pcam_enhanced_features_bits {
u8 reserved_at_0[0x6d];
u8 rx_icrc_encapsulated_counter[0x1];
u8 reserved_at_6e[0x8];
u8 reserved_at_6e[0x4];
u8 ptys_extended_ethernet[0x1];
u8 reserved_at_73[0x3];
u8 pfcc_mask[0x1];
u8 reserved_at_77[0x3];
u8 per_lane_error_counters[0x1];
@ -8746,7 +8759,8 @@ struct mlx5_ifc_initial_seg_bits {
u8 initializing[0x1];
u8 reserved_at_fe1[0x4];
u8 nic_interface_supported[0x3];
u8 reserved_at_fe8[0x18];
u8 embedded_cpu[0x1];
u8 reserved_at_fe9[0x17];
struct mlx5_ifc_health_buffer_bits health_buffer;
@ -9513,4 +9527,44 @@ struct mlx5_ifc_mtrc_ctrl_bits {
u8 reserved_at_80[0x180];
};
struct mlx5_ifc_host_params_context_bits {
u8 host_number[0x8];
u8 reserved_at_8[0x8];
u8 host_num_of_vfs[0x10];
u8 reserved_at_20[0x10];
u8 host_pci_bus[0x10];
u8 reserved_at_40[0x10];
u8 host_pci_device[0x10];
u8 reserved_at_60[0x10];
u8 host_pci_function[0x10];
u8 reserved_at_80[0x180];
};
struct mlx5_ifc_query_host_params_in_bits {
u8 opcode[0x10];
u8 reserved_at_10[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
u8 reserved_at_40[0x40];
};
struct mlx5_ifc_query_host_params_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
u8 syndrome[0x20];
u8 reserved_at_40[0x40];
struct mlx5_ifc_host_params_context_bits host_params_context;
u8 reserved_at_280[0x180];
};
#endif /* MLX5_IFC_H */

View File

@ -92,6 +92,22 @@ enum mlx5e_link_mode {
MLX5E_LINK_MODES_NUMBER,
};
enum mlx5e_ext_link_mode {
MLX5E_SGMII_100M = 0,
MLX5E_1000BASE_X_SGMII = 1,
MLX5E_5GBASE_R = 3,
MLX5E_10GBASE_XFI_XAUI_1 = 4,
MLX5E_40GBASE_XLAUI_4_XLPPI_4 = 5,
MLX5E_25GAUI_1_25GBASE_CR_KR = 6,
MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2 = 7,
MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR = 8,
MLX5E_CAUI_4_100GBASE_CR4_KR4 = 9,
MLX5E_100GAUI_2_100GBASE_CR2_KR2 = 10,
MLX5E_200GAUI_4_200GBASE_CR4_KR4 = 12,
MLX5E_400GAUI_8 = 15,
MLX5E_EXT_LINK_MODES_NUMBER,
};
enum mlx5e_connector_type {
MLX5E_PORT_UNKNOWN = 0,
MLX5E_PORT_NONE = 1,
@ -106,31 +122,23 @@ enum mlx5e_connector_type {
};
#define MLX5E_PROT_MASK(link_mode) (1 << link_mode)
#define MLX5_GET_ETH_PROTO(reg, out, ext, field) \
(ext ? MLX5_GET(reg, out, ext_##field) : \
MLX5_GET(reg, out, field))
int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps);
int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys,
int ptys_size, int proto_mask, u8 local_port);
int mlx5_query_port_proto_cap(struct mlx5_core_dev *dev,
u32 *proto_cap, int proto_mask);
int mlx5_query_port_proto_admin(struct mlx5_core_dev *dev,
u32 *proto_admin, int proto_mask);
int mlx5_query_port_link_width_oper(struct mlx5_core_dev *dev,
u8 *link_width_oper, u8 local_port);
int mlx5_query_port_ib_proto_oper(struct mlx5_core_dev *dev,
u8 *proto_oper, u8 local_port);
int mlx5_query_port_eth_proto_oper(struct mlx5_core_dev *dev,
u32 *proto_oper, u8 local_port);
int mlx5_set_port_ptys(struct mlx5_core_dev *dev, bool an_disable,
u32 proto_admin, int proto_mask);
void mlx5_toggle_port_link(struct mlx5_core_dev *dev);
int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
enum mlx5_port_status status);
int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
enum mlx5_port_status *status);
int mlx5_set_port_beacon(struct mlx5_core_dev *dev, u16 beacon_duration);
void mlx5_query_port_autoneg(struct mlx5_core_dev *dev, int proto_mask,
u8 *an_status,
u8 *an_disable_cap, u8 *an_disable_admin);
int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port);
void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, u16 *max_mtu, u8 port);

View File

@ -36,12 +36,25 @@
#include <linux/mlx5/driver.h>
#include <linux/mlx5/device.h>
#define MLX5_VPORT_PF_PLACEHOLDER (1u)
#define MLX5_SPECIAL_VPORTS (MLX5_VPORT_PF_PLACEHOLDER)
#define MLX5_TOTAL_VPORTS(mdev) (MLX5_SPECIAL_VPORTS + mlx5_core_max_vfs(mdev))
#define MLX5_VPORT_MANAGER(mdev) \
(MLX5_CAP_GEN(mdev, vport_group_manager) && \
(MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) && \
mlx5_core_is_pf(mdev))
enum {
MLX5_CAP_INLINE_MODE_L2,
MLX5_CAP_INLINE_MODE_VPORT_CONTEXT,
MLX5_CAP_INLINE_MODE_NOT_REQUIRED,
};
enum {
MLX5_VPORT_UPLINK = 0xffff
};
u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport);
int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
u16 vport, u8 state);
@ -60,7 +73,7 @@ int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
u64 *system_image_guid);
int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid);
int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
u32 vport, u64 node_guid);
u16 vport, u64 node_guid);
int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
u16 *qkey_viol_cntr);
int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport,
@ -78,7 +91,7 @@ int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev *dev,
int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *dev,
u64 *node_guid);
int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
u32 vport,
u16 vport,
enum mlx5_list_type list_type,
u8 addr_list[][ETH_ALEN],
int *list_size);
@ -87,7 +100,7 @@ int mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev *dev,
u8 addr_list[][ETH_ALEN],
int list_size);
int mlx5_query_nic_vport_promisc(struct mlx5_core_dev *mdev,
u32 vport,
u16 vport,
int *promisc_uc,
int *promisc_mc,
int *promisc_all);
@ -96,7 +109,7 @@ int mlx5_modify_nic_vport_promisc(struct mlx5_core_dev *mdev,
int promisc_mc,
int promisc_all);
int mlx5_query_nic_vport_vlans(struct mlx5_core_dev *dev,
u32 vport,
u16 vport,
u16 vlans[],
int *size);
int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev,