mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-21 10:05:00 +08:00
Merge branch 'mlx5-devx' into wip/dl-for-next
From Yishai, ----------------------------------- This series enriches DEVX support in few aspects: it enables interoperability between DEVX and verbs and improves mechanism for controlling privileged DEVX commands. The first patch updates mlx5 ifc file. Next 3 patches enable modifying and querying verbs objects via the DEVX interface. To achieve that the core layer introduced the 'UVERBS_IDR_ANY_OBJECT' type to match any IDR object. Once it's used by some driver's method, the infrastructure skips checking for the IDR type and it becomes the driver handler responsibility. The DEVX methods of modify and query were changed to get any object type via the 'UVERBS_IDR_ANY_OBJECT' mechanism. The type checking is done per object as part of the driver code. The next 3 patches introduce more robust mechanism for controlling privileged DEVX commands. The responsibility to block/allow per command was moved to be done in the firmware based on the UID credentials that the driver reports upon user context creation. This enables more granularity per command based on the device security model and the user credentials. In addition, by introducing a valid range for 'general commands' we prevent the need to touch the driver's code any time that a new future command will be added. The last patch fixes the XRC verbs flow once a DEVX context is used. This is needed as XRCD is some shared kernel resource and as such a kernel UID (=0) should be used in its related resources. Thanks Yishai Hadas ----------------------------------- The top 6 patches are the mlx5-devx series, the remainder are from the mlx5-next tree as the mlx5-devx series depended on the mlx5-next mlx5_ifc file update. * mlx5-devx: (42 commits) IB/mlx5: Allow XRC usage via verbs in DEVX context IB/mlx5: Update the supported DEVX commands IB/mlx5: Enforce DEVX privilege by firmware IB/mlx5: Enable modify and query verbs objects via DEVX IB/core: Enable getting an object type from a given uobject IB/core: Introduce UVERBS_IDR_ANY_OBJECT net/mlx5: Update mlx5_ifc with DEVX UCTX capabilities bits RDMA/mlx5: Unfold modify RMP function RDMA/mlx5: Unfold create RMP function RDMA/mlx5: Initialize SRQ tables on mlx5_ib RDMA/mlx5: Update SRQ functions signatures to mlx5_ib format RDMA/mlx5: Use stages for callback to setup and release DEVX RDMA/mlx5: Remove SRQ signature global flag net/mlx5: Move SRQ functions to RDMA part net/mlx5: Remove references to local mlx5_core functions net/mlx5: Remove not-used lib/eq.h header file net/mlx5: Remove dead transobj code net/mlx5: Align SRQ licenses and copyright information net/mlx5: Debug print for forwarded async events net/mlx5: Forward SRQ resource events ... Signed-off-by: Doug Ledford <dledford@redhat.com>
This commit is contained in:
commit
b2d8754f9a
@ -398,16 +398,23 @@ struct ib_uobject *rdma_lookup_get_uobject(const struct uverbs_api_object *obj,
|
||||
struct ib_uobject *uobj;
|
||||
int ret;
|
||||
|
||||
if (!obj)
|
||||
return ERR_PTR(-EINVAL);
|
||||
if (IS_ERR(obj) && PTR_ERR(obj) == -ENOMSG) {
|
||||
/* must be UVERBS_IDR_ANY_OBJECT, see uapi_get_object() */
|
||||
uobj = lookup_get_idr_uobject(NULL, ufile, id, mode);
|
||||
if (IS_ERR(uobj))
|
||||
return uobj;
|
||||
} else {
|
||||
if (IS_ERR(obj))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
uobj = obj->type_class->lookup_get(obj, ufile, id, mode);
|
||||
if (IS_ERR(uobj))
|
||||
return uobj;
|
||||
uobj = obj->type_class->lookup_get(obj, ufile, id, mode);
|
||||
if (IS_ERR(uobj))
|
||||
return uobj;
|
||||
|
||||
if (uobj->uapi_object != obj) {
|
||||
ret = -EINVAL;
|
||||
goto free;
|
||||
if (uobj->uapi_object != obj) {
|
||||
ret = -EINVAL;
|
||||
goto free;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
@ -427,7 +434,7 @@ struct ib_uobject *rdma_lookup_get_uobject(const struct uverbs_api_object *obj,
|
||||
|
||||
return uobj;
|
||||
free:
|
||||
obj->type_class->lookup_put(uobj, mode);
|
||||
uobj->uapi_object->type_class->lookup_put(uobj, mode);
|
||||
uverbs_uobject_put(uobj);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
@ -491,7 +498,7 @@ struct ib_uobject *rdma_alloc_begin_uobject(const struct uverbs_api_object *obj,
|
||||
{
|
||||
struct ib_uobject *ret;
|
||||
|
||||
if (!obj)
|
||||
if (IS_ERR(obj))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
/*
|
||||
|
@ -118,11 +118,6 @@ void release_ufile_idr_uobject(struct ib_uverbs_file *ufile);
|
||||
* Depending on ID the slot pointer in the radix tree points at one of these
|
||||
* structs.
|
||||
*/
|
||||
struct uverbs_api_object {
|
||||
const struct uverbs_obj_type *type_attrs;
|
||||
const struct uverbs_obj_type_class *type_class;
|
||||
u8 disabled:1;
|
||||
};
|
||||
|
||||
struct uverbs_api_ioctl_method {
|
||||
int(__rcu *handler)(struct uverbs_attr_bundle *attrs);
|
||||
@ -162,10 +157,24 @@ struct uverbs_api {
|
||||
const struct uverbs_api_write_method **write_ex_methods;
|
||||
};
|
||||
|
||||
/*
|
||||
* Get an uverbs_api_object that corresponds to the given object_id.
|
||||
* Note:
|
||||
* -ENOMSG means that any object is allowed to match during lookup.
|
||||
*/
|
||||
static inline const struct uverbs_api_object *
|
||||
uapi_get_object(struct uverbs_api *uapi, u16 object_id)
|
||||
{
|
||||
return radix_tree_lookup(&uapi->radix, uapi_key_obj(object_id));
|
||||
const struct uverbs_api_object *res;
|
||||
|
||||
if (object_id == UVERBS_IDR_ANY_OBJECT)
|
||||
return ERR_PTR(-ENOMSG);
|
||||
|
||||
res = radix_tree_lookup(&uapi->radix, uapi_key_obj(object_id));
|
||||
if (!res)
|
||||
return ERR_PTR(-ENOENT);
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
char *uapi_key_format(char *S, unsigned int key);
|
||||
|
@ -184,6 +184,7 @@ static int uapi_merge_obj_tree(struct uverbs_api *uapi,
|
||||
if (WARN_ON(obj_elm->type_attrs))
|
||||
return -EINVAL;
|
||||
|
||||
obj_elm->id = obj->id;
|
||||
obj_elm->type_attrs = obj->type_attrs;
|
||||
obj_elm->type_class = obj->type_attrs->type_class;
|
||||
/*
|
||||
@ -580,8 +581,13 @@ again:
|
||||
if (obj_key == UVERBS_API_KEY_ERR)
|
||||
continue;
|
||||
tmp_obj = uapi_get_object(uapi, obj_key);
|
||||
if (tmp_obj && !tmp_obj->disabled)
|
||||
continue;
|
||||
if (IS_ERR(tmp_obj)) {
|
||||
if (PTR_ERR(tmp_obj) == -ENOMSG)
|
||||
continue;
|
||||
} else {
|
||||
if (!tmp_obj->disabled)
|
||||
continue;
|
||||
}
|
||||
|
||||
starting_key = iter.index;
|
||||
uapi_remove_method(
|
||||
|
@ -1,6 +1,8 @@
|
||||
obj-$(CONFIG_MLX5_INFINIBAND) += mlx5_ib.o
|
||||
|
||||
mlx5_ib-y := main.o cq.o doorbell.o qp.o mem.o srq.o mr.o ah.o mad.o gsi.o ib_virt.o cmd.o cong.o
|
||||
mlx5_ib-y := main.o cq.o doorbell.o qp.o mem.o srq_cmd.o \
|
||||
srq.o mr.o ah.o mad.o gsi.o ib_virt.o cmd.o \
|
||||
cong.o
|
||||
mlx5_ib-$(CONFIG_INFINIBAND_ON_DEMAND_PAGING) += odp.o
|
||||
mlx5_ib-$(CONFIG_MLX5_ESWITCH) += ib_rep.o
|
||||
mlx5_ib-$(CONFIG_INFINIBAND_USER_ACCESS) += devx.o
|
||||
|
@ -35,6 +35,7 @@
|
||||
#include <rdma/ib_user_verbs.h>
|
||||
#include <rdma/ib_cache.h>
|
||||
#include "mlx5_ib.h"
|
||||
#include "srq.h"
|
||||
|
||||
static void mlx5_ib_cq_comp(struct mlx5_core_cq *cq)
|
||||
{
|
||||
@ -177,8 +178,7 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
|
||||
struct mlx5_core_srq *msrq = NULL;
|
||||
|
||||
if (qp->ibqp.xrcd) {
|
||||
msrq = mlx5_core_get_srq(dev->mdev,
|
||||
be32_to_cpu(cqe->srqn));
|
||||
msrq = mlx5_cmd_get_srq(dev, be32_to_cpu(cqe->srqn));
|
||||
srq = to_mibsrq(msrq);
|
||||
} else {
|
||||
srq = to_msrq(qp->ibqp.srq);
|
||||
|
@ -9,6 +9,7 @@
|
||||
#include <rdma/uverbs_ioctl.h>
|
||||
#include <rdma/mlx5_user_ioctl_cmds.h>
|
||||
#include <rdma/ib_umem.h>
|
||||
#include <rdma/uverbs_std_types.h>
|
||||
#include <linux/mlx5/driver.h>
|
||||
#include <linux/mlx5/fs.h>
|
||||
#include "mlx5_ib.h"
|
||||
@ -46,24 +47,31 @@ devx_ufile2uctx(const struct uverbs_attr_bundle *attrs)
|
||||
return to_mucontext(ib_uverbs_get_ucontext(attrs));
|
||||
}
|
||||
|
||||
int mlx5_ib_devx_create(struct mlx5_ib_dev *dev)
|
||||
int mlx5_ib_devx_create(struct mlx5_ib_dev *dev, bool is_user)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(create_uctx_in)] = {0};
|
||||
u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
|
||||
u64 general_obj_types;
|
||||
void *hdr;
|
||||
void *hdr, *uctx;
|
||||
int err;
|
||||
u16 uid;
|
||||
u32 cap = 0;
|
||||
|
||||
hdr = MLX5_ADDR_OF(create_uctx_in, in, hdr);
|
||||
uctx = MLX5_ADDR_OF(create_uctx_in, in, uctx);
|
||||
|
||||
general_obj_types = MLX5_CAP_GEN_64(dev->mdev, general_obj_types);
|
||||
if (!(general_obj_types & MLX5_GENERAL_OBJ_TYPES_CAP_UCTX) ||
|
||||
!(general_obj_types & MLX5_GENERAL_OBJ_TYPES_CAP_UMEM))
|
||||
return -EINVAL;
|
||||
|
||||
if (is_user && capable(CAP_NET_RAW) &&
|
||||
(MLX5_CAP_GEN(dev->mdev, uctx_cap) & MLX5_UCTX_CAP_RAW_TX))
|
||||
cap |= MLX5_UCTX_CAP_RAW_TX;
|
||||
|
||||
MLX5_SET(general_obj_in_cmd_hdr, hdr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
|
||||
MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_type, MLX5_OBJ_TYPE_UCTX);
|
||||
MLX5_SET(uctx, uctx, cap, cap);
|
||||
|
||||
err = mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
|
||||
if (err)
|
||||
@ -132,7 +140,7 @@ static u64 get_enc_obj_id(u16 opcode, u32 obj_id)
|
||||
return ((u64)opcode << 32) | obj_id;
|
||||
}
|
||||
|
||||
static int devx_is_valid_obj_id(struct devx_obj *obj, const void *in)
|
||||
static u64 devx_get_obj_id(const void *in)
|
||||
{
|
||||
u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
|
||||
u64 obj_id;
|
||||
@ -306,6 +314,8 @@ static int devx_is_valid_obj_id(struct devx_obj *obj, const void *in)
|
||||
MLX5_GET(query_dct_in, in, dctn));
|
||||
break;
|
||||
case MLX5_CMD_OP_QUERY_XRQ:
|
||||
case MLX5_CMD_OP_QUERY_XRQ_DC_PARAMS_ENTRY:
|
||||
case MLX5_CMD_OP_QUERY_XRQ_ERROR_PARAMS:
|
||||
obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRQ,
|
||||
MLX5_GET(query_xrq_in, in, xrqn));
|
||||
break;
|
||||
@ -332,17 +342,107 @@ static int devx_is_valid_obj_id(struct devx_obj *obj, const void *in)
|
||||
MLX5_GET(drain_dct_in, in, dctn));
|
||||
break;
|
||||
case MLX5_CMD_OP_ARM_XRQ:
|
||||
case MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY:
|
||||
obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRQ,
|
||||
MLX5_GET(arm_xrq_in, in, xrqn));
|
||||
break;
|
||||
case MLX5_CMD_OP_QUERY_PACKET_REFORMAT_CONTEXT:
|
||||
obj_id = get_enc_obj_id
|
||||
(MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT,
|
||||
MLX5_GET(query_packet_reformat_context_in,
|
||||
in, packet_reformat_id));
|
||||
break;
|
||||
default:
|
||||
obj_id = 0;
|
||||
}
|
||||
|
||||
return obj_id;
|
||||
}
|
||||
|
||||
static bool devx_is_valid_obj_id(struct ib_uobject *uobj, const void *in)
|
||||
{
|
||||
u64 obj_id = devx_get_obj_id(in);
|
||||
|
||||
if (!obj_id)
|
||||
return false;
|
||||
|
||||
switch (uobj_get_object_id(uobj)) {
|
||||
case UVERBS_OBJECT_CQ:
|
||||
return get_enc_obj_id(MLX5_CMD_OP_CREATE_CQ,
|
||||
to_mcq(uobj->object)->mcq.cqn) ==
|
||||
obj_id;
|
||||
|
||||
case UVERBS_OBJECT_SRQ:
|
||||
{
|
||||
struct mlx5_core_srq *srq = &(to_msrq(uobj->object)->msrq);
|
||||
struct mlx5_ib_dev *dev = to_mdev(uobj->context->device);
|
||||
u16 opcode;
|
||||
|
||||
switch (srq->common.res) {
|
||||
case MLX5_RES_XSRQ:
|
||||
opcode = MLX5_CMD_OP_CREATE_XRC_SRQ;
|
||||
break;
|
||||
case MLX5_RES_XRQ:
|
||||
opcode = MLX5_CMD_OP_CREATE_XRQ;
|
||||
break;
|
||||
default:
|
||||
if (!dev->mdev->issi)
|
||||
opcode = MLX5_CMD_OP_CREATE_SRQ;
|
||||
else
|
||||
opcode = MLX5_CMD_OP_CREATE_RMP;
|
||||
}
|
||||
|
||||
return get_enc_obj_id(opcode,
|
||||
to_msrq(uobj->object)->msrq.srqn) ==
|
||||
obj_id;
|
||||
}
|
||||
|
||||
case UVERBS_OBJECT_QP:
|
||||
{
|
||||
struct mlx5_ib_qp *qp = to_mqp(uobj->object);
|
||||
enum ib_qp_type qp_type = qp->ibqp.qp_type;
|
||||
|
||||
if (qp_type == IB_QPT_RAW_PACKET ||
|
||||
(qp->flags & MLX5_IB_QP_UNDERLAY)) {
|
||||
struct mlx5_ib_raw_packet_qp *raw_packet_qp =
|
||||
&qp->raw_packet_qp;
|
||||
struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
|
||||
struct mlx5_ib_sq *sq = &raw_packet_qp->sq;
|
||||
|
||||
return (get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
|
||||
rq->base.mqp.qpn) == obj_id ||
|
||||
get_enc_obj_id(MLX5_CMD_OP_CREATE_SQ,
|
||||
sq->base.mqp.qpn) == obj_id ||
|
||||
get_enc_obj_id(MLX5_CMD_OP_CREATE_TIR,
|
||||
rq->tirn) == obj_id ||
|
||||
get_enc_obj_id(MLX5_CMD_OP_CREATE_TIS,
|
||||
sq->tisn) == obj_id);
|
||||
}
|
||||
|
||||
if (qp_type == MLX5_IB_QPT_DCT)
|
||||
return get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT,
|
||||
qp->dct.mdct.mqp.qpn) == obj_id;
|
||||
|
||||
return get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
|
||||
qp->ibqp.qp_num) == obj_id;
|
||||
}
|
||||
|
||||
case UVERBS_OBJECT_WQ:
|
||||
return get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
|
||||
to_mrwq(uobj->object)->core_qp.qpn) ==
|
||||
obj_id;
|
||||
|
||||
case UVERBS_OBJECT_RWQ_IND_TBL:
|
||||
return get_enc_obj_id(MLX5_CMD_OP_CREATE_RQT,
|
||||
to_mrwq_ind_table(uobj->object)->rqtn) ==
|
||||
obj_id;
|
||||
|
||||
case MLX5_IB_OBJECT_DEVX_OBJ:
|
||||
return ((struct devx_obj *)uobj->object)->obj_id == obj_id;
|
||||
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
|
||||
if (obj_id == obj->obj_id)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static void devx_set_umem_valid(const void *in)
|
||||
@ -510,6 +610,7 @@ static bool devx_is_obj_modify_cmd(const void *in)
|
||||
case MLX5_CMD_OP_DRAIN_DCT:
|
||||
case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
|
||||
case MLX5_CMD_OP_ARM_XRQ:
|
||||
case MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY:
|
||||
return true;
|
||||
case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
|
||||
{
|
||||
@ -551,6 +652,9 @@ static bool devx_is_obj_query_cmd(const void *in)
|
||||
case MLX5_CMD_OP_QUERY_XRC_SRQ:
|
||||
case MLX5_CMD_OP_QUERY_DCT:
|
||||
case MLX5_CMD_OP_QUERY_XRQ:
|
||||
case MLX5_CMD_OP_QUERY_XRQ_DC_PARAMS_ENTRY:
|
||||
case MLX5_CMD_OP_QUERY_XRQ_ERROR_PARAMS:
|
||||
case MLX5_CMD_OP_QUERY_PACKET_REFORMAT_CONTEXT:
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
@ -588,15 +692,16 @@ static int devx_get_uid(struct mlx5_ib_ucontext *c, void *cmd_in)
|
||||
if (!c->devx_uid)
|
||||
return -EINVAL;
|
||||
|
||||
if (!capable(CAP_NET_RAW))
|
||||
return -EPERM;
|
||||
|
||||
return c->devx_uid;
|
||||
}
|
||||
static bool devx_is_general_cmd(void *in)
|
||||
{
|
||||
u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
|
||||
|
||||
if (opcode >= MLX5_CMD_OP_GENERAL_START &&
|
||||
opcode < MLX5_CMD_OP_GENERAL_END)
|
||||
return true;
|
||||
|
||||
switch (opcode) {
|
||||
case MLX5_CMD_OP_QUERY_HCA_CAP:
|
||||
case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
|
||||
@ -994,7 +1099,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_MODIFY)(
|
||||
struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs,
|
||||
MLX5_IB_ATTR_DEVX_OBJ_MODIFY_HANDLE);
|
||||
struct mlx5_ib_ucontext *c = to_mucontext(uobj->context);
|
||||
struct devx_obj *obj = uobj->object;
|
||||
struct mlx5_ib_dev *mdev = to_mdev(uobj->context->device);
|
||||
void *cmd_out;
|
||||
int err;
|
||||
int uid;
|
||||
@ -1006,7 +1111,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_MODIFY)(
|
||||
if (!devx_is_obj_modify_cmd(cmd_in))
|
||||
return -EINVAL;
|
||||
|
||||
if (!devx_is_valid_obj_id(obj, cmd_in))
|
||||
if (!devx_is_valid_obj_id(uobj, cmd_in))
|
||||
return -EINVAL;
|
||||
|
||||
cmd_out = uverbs_zalloc(attrs, cmd_out_len);
|
||||
@ -1016,7 +1121,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_MODIFY)(
|
||||
MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
|
||||
devx_set_umem_valid(cmd_in);
|
||||
|
||||
err = mlx5_cmd_exec(obj->mdev, cmd_in,
|
||||
err = mlx5_cmd_exec(mdev->mdev, cmd_in,
|
||||
uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN),
|
||||
cmd_out, cmd_out_len);
|
||||
if (err)
|
||||
@ -1035,10 +1140,10 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_QUERY)(
|
||||
struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs,
|
||||
MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE);
|
||||
struct mlx5_ib_ucontext *c = to_mucontext(uobj->context);
|
||||
struct devx_obj *obj = uobj->object;
|
||||
void *cmd_out;
|
||||
int err;
|
||||
int uid;
|
||||
struct mlx5_ib_dev *mdev = to_mdev(uobj->context->device);
|
||||
|
||||
uid = devx_get_uid(c, cmd_in);
|
||||
if (uid < 0)
|
||||
@ -1047,7 +1152,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_QUERY)(
|
||||
if (!devx_is_obj_query_cmd(cmd_in))
|
||||
return -EINVAL;
|
||||
|
||||
if (!devx_is_valid_obj_id(obj, cmd_in))
|
||||
if (!devx_is_valid_obj_id(uobj, cmd_in))
|
||||
return -EINVAL;
|
||||
|
||||
cmd_out = uverbs_zalloc(attrs, cmd_out_len);
|
||||
@ -1055,7 +1160,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_QUERY)(
|
||||
return PTR_ERR(cmd_out);
|
||||
|
||||
MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
|
||||
err = mlx5_cmd_exec(obj->mdev, cmd_in,
|
||||
err = mlx5_cmd_exec(mdev->mdev, cmd_in,
|
||||
uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN),
|
||||
cmd_out, cmd_out_len);
|
||||
if (err)
|
||||
@ -1155,9 +1260,6 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_UMEM_REG)(
|
||||
if (!c->devx_uid)
|
||||
return -EINVAL;
|
||||
|
||||
if (!capable(CAP_NET_RAW))
|
||||
return -EPERM;
|
||||
|
||||
obj = kzalloc(sizeof(struct devx_umem), GFP_KERNEL);
|
||||
if (!obj)
|
||||
return -ENOMEM;
|
||||
@ -1293,7 +1395,7 @@ DECLARE_UVERBS_NAMED_METHOD_DESTROY(
|
||||
DECLARE_UVERBS_NAMED_METHOD(
|
||||
MLX5_IB_METHOD_DEVX_OBJ_MODIFY,
|
||||
UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_MODIFY_HANDLE,
|
||||
MLX5_IB_OBJECT_DEVX_OBJ,
|
||||
UVERBS_IDR_ANY_OBJECT,
|
||||
UVERBS_ACCESS_WRITE,
|
||||
UA_MANDATORY),
|
||||
UVERBS_ATTR_PTR_IN(
|
||||
@ -1309,7 +1411,7 @@ DECLARE_UVERBS_NAMED_METHOD(
|
||||
DECLARE_UVERBS_NAMED_METHOD(
|
||||
MLX5_IB_METHOD_DEVX_OBJ_QUERY,
|
||||
UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE,
|
||||
MLX5_IB_OBJECT_DEVX_OBJ,
|
||||
UVERBS_IDR_ANY_OBJECT,
|
||||
UVERBS_ACCESS_READ,
|
||||
UA_MANDATORY),
|
||||
UVERBS_ATTR_PTR_IN(
|
||||
|
@ -4,6 +4,7 @@
|
||||
*/
|
||||
|
||||
#include "ib_rep.h"
|
||||
#include "srq.h"
|
||||
|
||||
static const struct mlx5_ib_profile rep_profile = {
|
||||
STAGE_CREATE(MLX5_IB_STAGE_INIT,
|
||||
@ -21,6 +22,9 @@ static const struct mlx5_ib_profile rep_profile = {
|
||||
STAGE_CREATE(MLX5_IB_STAGE_ROCE,
|
||||
mlx5_ib_stage_rep_roce_init,
|
||||
mlx5_ib_stage_rep_roce_cleanup),
|
||||
STAGE_CREATE(MLX5_IB_STAGE_SRQ,
|
||||
mlx5_init_srq_table,
|
||||
mlx5_cleanup_srq_table),
|
||||
STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES,
|
||||
mlx5_ib_stage_dev_res_init,
|
||||
mlx5_ib_stage_dev_res_cleanup),
|
||||
|
@ -60,6 +60,7 @@
|
||||
#include "mlx5_ib.h"
|
||||
#include "ib_rep.h"
|
||||
#include "cmd.h"
|
||||
#include "srq.h"
|
||||
#include <linux/mlx5/fs_helpers.h>
|
||||
#include <linux/mlx5/accel.h>
|
||||
#include <rdma/uverbs_std_types.h>
|
||||
@ -82,10 +83,13 @@ static char mlx5_version[] =
|
||||
|
||||
struct mlx5_ib_event_work {
|
||||
struct work_struct work;
|
||||
struct mlx5_core_dev *dev;
|
||||
void *context;
|
||||
enum mlx5_dev_event event;
|
||||
unsigned long param;
|
||||
union {
|
||||
struct mlx5_ib_dev *dev;
|
||||
struct mlx5_ib_multiport_info *mpi;
|
||||
};
|
||||
bool is_slave;
|
||||
unsigned int event;
|
||||
void *param;
|
||||
};
|
||||
|
||||
enum {
|
||||
@ -1759,7 +1763,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
|
||||
#endif
|
||||
|
||||
if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX) {
|
||||
err = mlx5_ib_devx_create(dev);
|
||||
err = mlx5_ib_devx_create(dev, true);
|
||||
if (err < 0)
|
||||
goto out_uars;
|
||||
context->devx_uid = err;
|
||||
@ -4244,6 +4248,63 @@ static void delay_drop_handler(struct work_struct *work)
|
||||
mutex_unlock(&delay_drop->lock);
|
||||
}
|
||||
|
||||
static void handle_general_event(struct mlx5_ib_dev *ibdev, struct mlx5_eqe *eqe,
|
||||
struct ib_event *ibev)
|
||||
{
|
||||
switch (eqe->sub_type) {
|
||||
case MLX5_GENERAL_SUBTYPE_DELAY_DROP_TIMEOUT:
|
||||
schedule_work(&ibdev->delay_drop.delay_drop_work);
|
||||
break;
|
||||
default: /* do nothing */
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
static int handle_port_change(struct mlx5_ib_dev *ibdev, struct mlx5_eqe *eqe,
|
||||
struct ib_event *ibev)
|
||||
{
|
||||
u8 port = (eqe->data.port.port >> 4) & 0xf;
|
||||
|
||||
ibev->element.port_num = port;
|
||||
|
||||
switch (eqe->sub_type) {
|
||||
case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
|
||||
case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
|
||||
case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED:
|
||||
/* In RoCE, port up/down events are handled in
|
||||
* mlx5_netdev_event().
|
||||
*/
|
||||
if (mlx5_ib_port_link_layer(&ibdev->ib_dev, port) ==
|
||||
IB_LINK_LAYER_ETHERNET)
|
||||
return -EINVAL;
|
||||
|
||||
ibev->event = (eqe->sub_type == MLX5_PORT_CHANGE_SUBTYPE_ACTIVE) ?
|
||||
IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
|
||||
break;
|
||||
|
||||
case MLX5_PORT_CHANGE_SUBTYPE_LID:
|
||||
ibev->event = IB_EVENT_LID_CHANGE;
|
||||
break;
|
||||
|
||||
case MLX5_PORT_CHANGE_SUBTYPE_PKEY:
|
||||
ibev->event = IB_EVENT_PKEY_CHANGE;
|
||||
schedule_work(&ibdev->devr.ports[port - 1].pkey_change_work);
|
||||
break;
|
||||
|
||||
case MLX5_PORT_CHANGE_SUBTYPE_GUID:
|
||||
ibev->event = IB_EVENT_GID_CHANGE;
|
||||
break;
|
||||
|
||||
case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG:
|
||||
ibev->event = IB_EVENT_CLIENT_REREGISTER;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void mlx5_ib_handle_event(struct work_struct *_work)
|
||||
{
|
||||
struct mlx5_ib_event_work *work =
|
||||
@ -4251,65 +4312,37 @@ static void mlx5_ib_handle_event(struct work_struct *_work)
|
||||
struct mlx5_ib_dev *ibdev;
|
||||
struct ib_event ibev;
|
||||
bool fatal = false;
|
||||
u8 port = (u8)work->param;
|
||||
|
||||
if (mlx5_core_is_mp_slave(work->dev)) {
|
||||
ibdev = mlx5_ib_get_ibdev_from_mpi(work->context);
|
||||
if (work->is_slave) {
|
||||
ibdev = mlx5_ib_get_ibdev_from_mpi(work->mpi);
|
||||
if (!ibdev)
|
||||
goto out;
|
||||
} else {
|
||||
ibdev = work->context;
|
||||
ibdev = work->dev;
|
||||
}
|
||||
|
||||
switch (work->event) {
|
||||
case MLX5_DEV_EVENT_SYS_ERROR:
|
||||
ibev.event = IB_EVENT_DEVICE_FATAL;
|
||||
mlx5_ib_handle_internal_error(ibdev);
|
||||
ibev.element.port_num = (u8)(unsigned long)work->param;
|
||||
fatal = true;
|
||||
break;
|
||||
|
||||
case MLX5_DEV_EVENT_PORT_UP:
|
||||
case MLX5_DEV_EVENT_PORT_DOWN:
|
||||
case MLX5_DEV_EVENT_PORT_INITIALIZED:
|
||||
/* In RoCE, port up/down events are handled in
|
||||
* mlx5_netdev_event().
|
||||
*/
|
||||
if (mlx5_ib_port_link_layer(&ibdev->ib_dev, port) ==
|
||||
IB_LINK_LAYER_ETHERNET)
|
||||
case MLX5_EVENT_TYPE_PORT_CHANGE:
|
||||
if (handle_port_change(ibdev, work->param, &ibev))
|
||||
goto out;
|
||||
|
||||
ibev.event = (work->event == MLX5_DEV_EVENT_PORT_UP) ?
|
||||
IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
|
||||
break;
|
||||
|
||||
case MLX5_DEV_EVENT_LID_CHANGE:
|
||||
ibev.event = IB_EVENT_LID_CHANGE;
|
||||
break;
|
||||
|
||||
case MLX5_DEV_EVENT_PKEY_CHANGE:
|
||||
ibev.event = IB_EVENT_PKEY_CHANGE;
|
||||
schedule_work(&ibdev->devr.ports[port - 1].pkey_change_work);
|
||||
break;
|
||||
|
||||
case MLX5_DEV_EVENT_GUID_CHANGE:
|
||||
ibev.event = IB_EVENT_GID_CHANGE;
|
||||
break;
|
||||
|
||||
case MLX5_DEV_EVENT_CLIENT_REREG:
|
||||
ibev.event = IB_EVENT_CLIENT_REREGISTER;
|
||||
break;
|
||||
case MLX5_DEV_EVENT_DELAY_DROP_TIMEOUT:
|
||||
schedule_work(&ibdev->delay_drop.delay_drop_work);
|
||||
goto out;
|
||||
case MLX5_EVENT_TYPE_GENERAL_EVENT:
|
||||
handle_general_event(ibdev, work->param, &ibev);
|
||||
/* fall through */
|
||||
default:
|
||||
goto out;
|
||||
}
|
||||
|
||||
ibev.device = &ibdev->ib_dev;
|
||||
ibev.element.port_num = port;
|
||||
ibev.device = &ibdev->ib_dev;
|
||||
|
||||
if (!rdma_is_port_valid(&ibdev->ib_dev, port)) {
|
||||
mlx5_ib_warn(ibdev, "warning: event on port %d\n", port);
|
||||
if (!rdma_is_port_valid(&ibdev->ib_dev, ibev.element.port_num)) {
|
||||
mlx5_ib_warn(ibdev, "warning: event on port %d\n", ibev.element.port_num);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -4322,22 +4355,43 @@ out:
|
||||
kfree(work);
|
||||
}
|
||||
|
||||
static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
|
||||
enum mlx5_dev_event event, unsigned long param)
|
||||
static int mlx5_ib_event(struct notifier_block *nb,
|
||||
unsigned long event, void *param)
|
||||
{
|
||||
struct mlx5_ib_event_work *work;
|
||||
|
||||
work = kmalloc(sizeof(*work), GFP_ATOMIC);
|
||||
if (!work)
|
||||
return;
|
||||
return NOTIFY_DONE;
|
||||
|
||||
INIT_WORK(&work->work, mlx5_ib_handle_event);
|
||||
work->dev = dev;
|
||||
work->dev = container_of(nb, struct mlx5_ib_dev, mdev_events);
|
||||
work->is_slave = false;
|
||||
work->param = param;
|
||||
work->context = context;
|
||||
work->event = event;
|
||||
|
||||
queue_work(mlx5_ib_event_wq, &work->work);
|
||||
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
static int mlx5_ib_event_slave_port(struct notifier_block *nb,
|
||||
unsigned long event, void *param)
|
||||
{
|
||||
struct mlx5_ib_event_work *work;
|
||||
|
||||
work = kmalloc(sizeof(*work), GFP_ATOMIC);
|
||||
if (!work)
|
||||
return NOTIFY_DONE;
|
||||
|
||||
INIT_WORK(&work->work, mlx5_ib_handle_event);
|
||||
work->mpi = container_of(nb, struct mlx5_ib_multiport_info, mdev_events);
|
||||
work->is_slave = true;
|
||||
work->param = param;
|
||||
work->event = event;
|
||||
queue_work(mlx5_ib_event_wq, &work->work);
|
||||
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
static int set_has_smi_cap(struct mlx5_ib_dev *dev)
|
||||
@ -5360,6 +5414,11 @@ static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev,
|
||||
spin_unlock(&port->mp.mpi_lock);
|
||||
return;
|
||||
}
|
||||
|
||||
if (mpi->mdev_events.notifier_call)
|
||||
mlx5_notifier_unregister(mpi->mdev, &mpi->mdev_events);
|
||||
mpi->mdev_events.notifier_call = NULL;
|
||||
|
||||
mpi->ibdev = NULL;
|
||||
|
||||
spin_unlock(&port->mp.mpi_lock);
|
||||
@ -5415,6 +5474,7 @@ static bool mlx5_ib_bind_slave_port(struct mlx5_ib_dev *ibdev,
|
||||
|
||||
ibdev->port[port_num].mp.mpi = mpi;
|
||||
mpi->ibdev = ibdev;
|
||||
mpi->mdev_events.notifier_call = NULL;
|
||||
spin_unlock(&ibdev->port[port_num].mp.mpi_lock);
|
||||
|
||||
err = mlx5_nic_vport_affiliate_multiport(ibdev->mdev, mpi->mdev);
|
||||
@ -5432,6 +5492,9 @@ static bool mlx5_ib_bind_slave_port(struct mlx5_ib_dev *ibdev,
|
||||
goto unbind;
|
||||
}
|
||||
|
||||
mpi->mdev_events.notifier_call = mlx5_ib_event_slave_port;
|
||||
mlx5_notifier_register(mpi->mdev, &mpi->mdev_events);
|
||||
|
||||
err = mlx5_ib_init_cong_debugfs(ibdev, port_num);
|
||||
if (err)
|
||||
goto unbind;
|
||||
@ -6155,6 +6218,34 @@ static void mlx5_ib_stage_rep_reg_cleanup(struct mlx5_ib_dev *dev)
|
||||
mlx5_ib_unregister_vport_reps(dev);
|
||||
}
|
||||
|
||||
static int mlx5_ib_stage_dev_notifier_init(struct mlx5_ib_dev *dev)
|
||||
{
|
||||
dev->mdev_events.notifier_call = mlx5_ib_event;
|
||||
mlx5_notifier_register(dev->mdev, &dev->mdev_events);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void mlx5_ib_stage_dev_notifier_cleanup(struct mlx5_ib_dev *dev)
|
||||
{
|
||||
mlx5_notifier_unregister(dev->mdev, &dev->mdev_events);
|
||||
}
|
||||
|
||||
static int mlx5_ib_stage_devx_init(struct mlx5_ib_dev *dev)
|
||||
{
|
||||
int uid;
|
||||
|
||||
uid = mlx5_ib_devx_create(dev, false);
|
||||
if (uid > 0)
|
||||
dev->devx_whitelist_uid = uid;
|
||||
|
||||
return 0;
|
||||
}
|
||||
static void mlx5_ib_stage_devx_cleanup(struct mlx5_ib_dev *dev)
|
||||
{
|
||||
if (dev->devx_whitelist_uid)
|
||||
mlx5_ib_devx_destroy(dev, dev->devx_whitelist_uid);
|
||||
}
|
||||
|
||||
void __mlx5_ib_remove(struct mlx5_ib_dev *dev,
|
||||
const struct mlx5_ib_profile *profile,
|
||||
int stage)
|
||||
@ -6166,8 +6257,6 @@ void __mlx5_ib_remove(struct mlx5_ib_dev *dev,
|
||||
profile->stage[stage].cleanup(dev);
|
||||
}
|
||||
|
||||
if (dev->devx_whitelist_uid)
|
||||
mlx5_ib_devx_destroy(dev, dev->devx_whitelist_uid);
|
||||
ib_dealloc_device((struct ib_device *)dev);
|
||||
}
|
||||
|
||||
@ -6176,7 +6265,6 @@ void *__mlx5_ib_add(struct mlx5_ib_dev *dev,
|
||||
{
|
||||
int err;
|
||||
int i;
|
||||
int uid;
|
||||
|
||||
for (i = 0; i < MLX5_IB_STAGE_MAX; i++) {
|
||||
if (profile->stage[i].init) {
|
||||
@ -6186,10 +6274,6 @@ void *__mlx5_ib_add(struct mlx5_ib_dev *dev,
|
||||
}
|
||||
}
|
||||
|
||||
uid = mlx5_ib_devx_create(dev);
|
||||
if (uid > 0)
|
||||
dev->devx_whitelist_uid = uid;
|
||||
|
||||
dev->profile = profile;
|
||||
dev->ib_active = true;
|
||||
|
||||
@ -6217,9 +6301,15 @@ static const struct mlx5_ib_profile pf_profile = {
|
||||
STAGE_CREATE(MLX5_IB_STAGE_ROCE,
|
||||
mlx5_ib_stage_roce_init,
|
||||
mlx5_ib_stage_roce_cleanup),
|
||||
STAGE_CREATE(MLX5_IB_STAGE_SRQ,
|
||||
mlx5_init_srq_table,
|
||||
mlx5_cleanup_srq_table),
|
||||
STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES,
|
||||
mlx5_ib_stage_dev_res_init,
|
||||
mlx5_ib_stage_dev_res_cleanup),
|
||||
STAGE_CREATE(MLX5_IB_STAGE_DEVICE_NOTIFIER,
|
||||
mlx5_ib_stage_dev_notifier_init,
|
||||
mlx5_ib_stage_dev_notifier_cleanup),
|
||||
STAGE_CREATE(MLX5_IB_STAGE_ODP,
|
||||
mlx5_ib_stage_odp_init,
|
||||
mlx5_ib_stage_odp_cleanup),
|
||||
@ -6238,6 +6328,9 @@ static const struct mlx5_ib_profile pf_profile = {
|
||||
STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR,
|
||||
NULL,
|
||||
mlx5_ib_stage_pre_ib_reg_umr_cleanup),
|
||||
STAGE_CREATE(MLX5_IB_STAGE_WHITELIST_UID,
|
||||
mlx5_ib_stage_devx_init,
|
||||
mlx5_ib_stage_devx_cleanup),
|
||||
STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
|
||||
mlx5_ib_stage_ib_reg_init,
|
||||
mlx5_ib_stage_ib_reg_cleanup),
|
||||
@ -6265,9 +6358,15 @@ static const struct mlx5_ib_profile nic_rep_profile = {
|
||||
STAGE_CREATE(MLX5_IB_STAGE_ROCE,
|
||||
mlx5_ib_stage_rep_roce_init,
|
||||
mlx5_ib_stage_rep_roce_cleanup),
|
||||
STAGE_CREATE(MLX5_IB_STAGE_SRQ,
|
||||
mlx5_init_srq_table,
|
||||
mlx5_cleanup_srq_table),
|
||||
STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES,
|
||||
mlx5_ib_stage_dev_res_init,
|
||||
mlx5_ib_stage_dev_res_cleanup),
|
||||
STAGE_CREATE(MLX5_IB_STAGE_DEVICE_NOTIFIER,
|
||||
mlx5_ib_stage_dev_notifier_init,
|
||||
mlx5_ib_stage_dev_notifier_cleanup),
|
||||
STAGE_CREATE(MLX5_IB_STAGE_COUNTERS,
|
||||
mlx5_ib_stage_counters_init,
|
||||
mlx5_ib_stage_counters_cleanup),
|
||||
@ -6385,7 +6484,6 @@ static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context)
|
||||
static struct mlx5_interface mlx5_ib_interface = {
|
||||
.add = mlx5_ib_add,
|
||||
.remove = mlx5_ib_remove,
|
||||
.event = mlx5_ib_event,
|
||||
.protocol = MLX5_INTERFACE_PROTOCOL_IB,
|
||||
};
|
||||
|
||||
|
@ -41,7 +41,6 @@
|
||||
#include <linux/mlx5/cq.h>
|
||||
#include <linux/mlx5/fs.h>
|
||||
#include <linux/mlx5/qp.h>
|
||||
#include <linux/mlx5/srq.h>
|
||||
#include <linux/mlx5/fs.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/mlx5/transobj.h>
|
||||
@ -50,6 +49,8 @@
|
||||
#include <rdma/uverbs_ioctl.h>
|
||||
#include <rdma/mlx5_user_ioctl_cmds.h>
|
||||
|
||||
#include "srq.h"
|
||||
|
||||
#define mlx5_ib_dbg(_dev, format, arg...) \
|
||||
dev_dbg(&(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, __func__, \
|
||||
__LINE__, current->pid, ##arg)
|
||||
@ -542,7 +543,6 @@ struct mlx5_ib_srq {
|
||||
struct mlx5_ib_xrcd {
|
||||
struct ib_xrcd ibxrcd;
|
||||
u32 xrcdn;
|
||||
u16 uid;
|
||||
};
|
||||
|
||||
enum mlx5_ib_mtt_access_flags {
|
||||
@ -776,13 +776,16 @@ enum mlx5_ib_stages {
|
||||
MLX5_IB_STAGE_CAPS,
|
||||
MLX5_IB_STAGE_NON_DEFAULT_CB,
|
||||
MLX5_IB_STAGE_ROCE,
|
||||
MLX5_IB_STAGE_SRQ,
|
||||
MLX5_IB_STAGE_DEVICE_RESOURCES,
|
||||
MLX5_IB_STAGE_DEVICE_NOTIFIER,
|
||||
MLX5_IB_STAGE_ODP,
|
||||
MLX5_IB_STAGE_COUNTERS,
|
||||
MLX5_IB_STAGE_CONG_DEBUGFS,
|
||||
MLX5_IB_STAGE_UAR,
|
||||
MLX5_IB_STAGE_BFREG,
|
||||
MLX5_IB_STAGE_PRE_IB_REG_UMR,
|
||||
MLX5_IB_STAGE_WHITELIST_UID,
|
||||
MLX5_IB_STAGE_IB_REG,
|
||||
MLX5_IB_STAGE_POST_IB_REG_UMR,
|
||||
MLX5_IB_STAGE_DELAY_DROP,
|
||||
@ -807,6 +810,7 @@ struct mlx5_ib_multiport_info {
|
||||
struct list_head list;
|
||||
struct mlx5_ib_dev *ibdev;
|
||||
struct mlx5_core_dev *mdev;
|
||||
struct notifier_block mdev_events;
|
||||
struct completion unref_comp;
|
||||
u64 sys_image_guid;
|
||||
u32 mdev_refcnt;
|
||||
@ -893,6 +897,7 @@ struct mlx5_ib_pf_eq {
|
||||
struct mlx5_ib_dev {
|
||||
struct ib_device ib_dev;
|
||||
struct mlx5_core_dev *mdev;
|
||||
struct notifier_block mdev_events;
|
||||
struct mlx5_roce roce[MLX5_MAX_PORTS];
|
||||
int num_ports;
|
||||
/* serialize update of capability mask
|
||||
@ -938,6 +943,7 @@ struct mlx5_ib_dev {
|
||||
u64 sys_image_guid;
|
||||
struct mlx5_memic memic;
|
||||
u16 devx_whitelist_uid;
|
||||
struct mlx5_srq_table srq_table;
|
||||
};
|
||||
|
||||
static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
|
||||
@ -1261,7 +1267,7 @@ void mlx5_ib_put_native_port_mdev(struct mlx5_ib_dev *dev,
|
||||
u8 port_num);
|
||||
|
||||
#if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
|
||||
int mlx5_ib_devx_create(struct mlx5_ib_dev *dev);
|
||||
int mlx5_ib_devx_create(struct mlx5_ib_dev *dev, bool is_user);
|
||||
void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev, u16 uid);
|
||||
const struct uverbs_object_tree_def *mlx5_ib_get_devx_tree(void);
|
||||
extern const struct uapi_definition mlx5_ib_devx_defs[];
|
||||
@ -1276,7 +1282,8 @@ int mlx5_ib_get_flow_trees(const struct uverbs_object_tree_def **root);
|
||||
void mlx5_ib_destroy_flow_action_raw(struct mlx5_ib_flow_action *maction);
|
||||
#else
|
||||
static inline int
|
||||
mlx5_ib_devx_create(struct mlx5_ib_dev *dev) { return -EOPNOTSUPP; };
|
||||
mlx5_ib_devx_create(struct mlx5_ib_dev *dev,
|
||||
bool is_user) { return -EOPNOTSUPP; }
|
||||
static inline void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev, u16 uid) {}
|
||||
static inline bool mlx5_ib_devx_is_flow_dest(void *obj, int *dest_id,
|
||||
int *dest_type)
|
||||
|
@ -775,6 +775,7 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
|
||||
__be64 *pas;
|
||||
void *qpc;
|
||||
int err;
|
||||
u16 uid;
|
||||
|
||||
err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));
|
||||
if (err) {
|
||||
@ -836,7 +837,8 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
|
||||
goto err_umem;
|
||||
}
|
||||
|
||||
MLX5_SET(create_qp_in, *in, uid, to_mpd(pd)->uid);
|
||||
uid = (attr->qp_type != IB_QPT_XRC_TGT) ? to_mpd(pd)->uid : 0;
|
||||
MLX5_SET(create_qp_in, *in, uid, uid);
|
||||
pas = (__be64 *)MLX5_ADDR_OF(create_qp_in, *in, pas);
|
||||
if (ubuffer->umem)
|
||||
mlx5_ib_populate_pas(dev, ubuffer->umem, page_shift, pas, 0);
|
||||
@ -5514,7 +5516,6 @@ struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev,
|
||||
struct mlx5_ib_dev *dev = to_mdev(ibdev);
|
||||
struct mlx5_ib_xrcd *xrcd;
|
||||
int err;
|
||||
u16 uid;
|
||||
|
||||
if (!MLX5_CAP_GEN(dev->mdev, xrc))
|
||||
return ERR_PTR(-ENOSYS);
|
||||
@ -5523,14 +5524,12 @@ struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev,
|
||||
if (!xrcd)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
uid = context ? to_mucontext(context)->devx_uid : 0;
|
||||
err = mlx5_cmd_xrcd_alloc(dev->mdev, &xrcd->xrcdn, uid);
|
||||
err = mlx5_cmd_xrcd_alloc(dev->mdev, &xrcd->xrcdn, 0);
|
||||
if (err) {
|
||||
kfree(xrcd);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
xrcd->uid = uid;
|
||||
return &xrcd->ibxrcd;
|
||||
}
|
||||
|
||||
@ -5538,10 +5537,9 @@ int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd)
|
||||
{
|
||||
struct mlx5_ib_dev *dev = to_mdev(xrcd->device);
|
||||
u32 xrcdn = to_mxrcd(xrcd)->xrcdn;
|
||||
u16 uid = to_mxrcd(xrcd)->uid;
|
||||
int err;
|
||||
|
||||
err = mlx5_cmd_xrcd_dealloc(dev->mdev, xrcdn, uid);
|
||||
err = mlx5_cmd_xrcd_dealloc(dev->mdev, xrcdn, 0);
|
||||
if (err)
|
||||
mlx5_ib_warn(dev, "failed to dealloc xrcdn 0x%x\n", xrcdn);
|
||||
|
||||
|
@ -1,46 +1,15 @@
|
||||
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
|
||||
/*
|
||||
* Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
* Copyright (c) 2013-2018, Mellanox Technologies inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/mlx5/qp.h>
|
||||
#include <linux/mlx5/srq.h>
|
||||
#include <linux/slab.h>
|
||||
#include <rdma/ib_umem.h>
|
||||
#include <rdma/ib_user_verbs.h>
|
||||
|
||||
#include "mlx5_ib.h"
|
||||
|
||||
/* not supported currently */
|
||||
static int srq_signature;
|
||||
#include "srq.h"
|
||||
|
||||
static void *get_wqe(struct mlx5_ib_srq *srq, int n)
|
||||
{
|
||||
@ -144,7 +113,7 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
|
||||
|
||||
in->log_page_size = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
|
||||
in->page_offset = offset;
|
||||
in->uid = to_mpd(pd)->uid;
|
||||
in->uid = (in->type != IB_SRQT_XRC) ? to_mpd(pd)->uid : 0;
|
||||
if (MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1 &&
|
||||
in->type != IB_SRQT_BASIC)
|
||||
in->user_index = uidx;
|
||||
@ -206,7 +175,7 @@ static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
|
||||
err = -ENOMEM;
|
||||
goto err_in;
|
||||
}
|
||||
srq->wq_sig = !!srq_signature;
|
||||
srq->wq_sig = 0;
|
||||
|
||||
in->log_page_size = srq->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT;
|
||||
if (MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1 &&
|
||||
@ -331,7 +300,7 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
|
||||
|
||||
in.pd = to_mpd(pd)->pdn;
|
||||
in.db_record = srq->db.dma;
|
||||
err = mlx5_core_create_srq(dev->mdev, &srq->msrq, &in);
|
||||
err = mlx5_cmd_create_srq(dev, &srq->msrq, &in);
|
||||
kvfree(in.pas);
|
||||
if (err) {
|
||||
mlx5_ib_dbg(dev, "create SRQ failed, err %d\n", err);
|
||||
@ -355,7 +324,7 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
|
||||
return &srq->ibsrq;
|
||||
|
||||
err_core:
|
||||
mlx5_core_destroy_srq(dev->mdev, &srq->msrq);
|
||||
mlx5_cmd_destroy_srq(dev, &srq->msrq);
|
||||
|
||||
err_usr_kern_srq:
|
||||
if (pd->uobject)
|
||||
@ -385,7 +354,7 @@ int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&srq->mutex);
|
||||
ret = mlx5_core_arm_srq(dev->mdev, &srq->msrq, attr->srq_limit, 1);
|
||||
ret = mlx5_cmd_arm_srq(dev, &srq->msrq, attr->srq_limit, 1);
|
||||
mutex_unlock(&srq->mutex);
|
||||
|
||||
if (ret)
|
||||
@ -406,7 +375,7 @@ int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
|
||||
if (!out)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = mlx5_core_query_srq(dev->mdev, &srq->msrq, out);
|
||||
ret = mlx5_cmd_query_srq(dev, &srq->msrq, out);
|
||||
if (ret)
|
||||
goto out_box;
|
||||
|
||||
@ -424,7 +393,7 @@ int mlx5_ib_destroy_srq(struct ib_srq *srq)
|
||||
struct mlx5_ib_dev *dev = to_mdev(srq->device);
|
||||
struct mlx5_ib_srq *msrq = to_msrq(srq);
|
||||
|
||||
mlx5_core_destroy_srq(dev->mdev, &msrq->msrq);
|
||||
mlx5_cmd_destroy_srq(dev, &msrq->msrq);
|
||||
|
||||
if (srq->uobject) {
|
||||
mlx5_ib_db_unmap_user(to_mucontext(srq->uobject->context), &msrq->db);
|
||||
|
73
drivers/infiniband/hw/mlx5/srq.h
Normal file
73
drivers/infiniband/hw/mlx5/srq.h
Normal file
@ -0,0 +1,73 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
|
||||
/*
|
||||
* Copyright (c) 2013-2018, Mellanox Technologies. All rights reserved.
|
||||
*/
|
||||
|
||||
#ifndef MLX5_IB_SRQ_H
|
||||
#define MLX5_IB_SRQ_H
|
||||
|
||||
enum {
|
||||
MLX5_SRQ_FLAG_ERR = (1 << 0),
|
||||
MLX5_SRQ_FLAG_WQ_SIG = (1 << 1),
|
||||
MLX5_SRQ_FLAG_RNDV = (1 << 2),
|
||||
};
|
||||
|
||||
struct mlx5_srq_attr {
|
||||
u32 type;
|
||||
u32 flags;
|
||||
u32 log_size;
|
||||
u32 wqe_shift;
|
||||
u32 log_page_size;
|
||||
u32 wqe_cnt;
|
||||
u32 srqn;
|
||||
u32 xrcd;
|
||||
u32 page_offset;
|
||||
u32 cqn;
|
||||
u32 pd;
|
||||
u32 lwm;
|
||||
u32 user_index;
|
||||
u64 db_record;
|
||||
__be64 *pas;
|
||||
u32 tm_log_list_size;
|
||||
u32 tm_next_tag;
|
||||
u32 tm_hw_phase_cnt;
|
||||
u32 tm_sw_phase_cnt;
|
||||
u16 uid;
|
||||
};
|
||||
|
||||
struct mlx5_ib_dev;
|
||||
|
||||
struct mlx5_core_srq {
|
||||
struct mlx5_core_rsc_common common; /* must be first */
|
||||
u32 srqn;
|
||||
int max;
|
||||
size_t max_gs;
|
||||
size_t max_avail_gather;
|
||||
int wqe_shift;
|
||||
void (*event)(struct mlx5_core_srq *srq, enum mlx5_event e);
|
||||
|
||||
atomic_t refcount;
|
||||
struct completion free;
|
||||
u16 uid;
|
||||
};
|
||||
|
||||
struct mlx5_srq_table {
|
||||
struct notifier_block nb;
|
||||
/* protect radix tree
|
||||
*/
|
||||
spinlock_t lock;
|
||||
struct radix_tree_root tree;
|
||||
};
|
||||
|
||||
int mlx5_cmd_create_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
|
||||
struct mlx5_srq_attr *in);
|
||||
int mlx5_cmd_destroy_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq);
|
||||
int mlx5_cmd_query_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
|
||||
struct mlx5_srq_attr *out);
|
||||
int mlx5_cmd_arm_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
|
||||
u16 lwm, int is_srq);
|
||||
struct mlx5_core_srq *mlx5_cmd_get_srq(struct mlx5_ib_dev *dev, u32 srqn);
|
||||
|
||||
int mlx5_init_srq_table(struct mlx5_ib_dev *dev);
|
||||
void mlx5_cleanup_srq_table(struct mlx5_ib_dev *dev);
|
||||
#endif /* MLX5_IB_SRQ_H */
|
@ -1,67 +1,13 @@
|
||||
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
|
||||
/*
|
||||
* Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
* Copyright (c) 2013-2018, Mellanox Technologies inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/mlx5/driver.h>
|
||||
#include <linux/mlx5/cmd.h>
|
||||
#include <linux/mlx5/srq.h>
|
||||
#include <rdma/ib_verbs.h>
|
||||
#include "mlx5_core.h"
|
||||
#include <linux/mlx5/transobj.h>
|
||||
|
||||
void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type)
|
||||
{
|
||||
struct mlx5_srq_table *table = &dev->priv.srq_table;
|
||||
struct mlx5_core_srq *srq;
|
||||
|
||||
spin_lock(&table->lock);
|
||||
|
||||
srq = radix_tree_lookup(&table->tree, srqn);
|
||||
if (srq)
|
||||
atomic_inc(&srq->refcount);
|
||||
|
||||
spin_unlock(&table->lock);
|
||||
|
||||
if (!srq) {
|
||||
mlx5_core_warn(dev, "Async event for bogus SRQ 0x%08x\n", srqn);
|
||||
return;
|
||||
}
|
||||
|
||||
srq->event(srq, event_type);
|
||||
|
||||
if (atomic_dec_and_test(&srq->refcount))
|
||||
complete(&srq->free);
|
||||
}
|
||||
#include "mlx5_ib.h"
|
||||
#include "srq.h"
|
||||
|
||||
static int get_pas_size(struct mlx5_srq_attr *in)
|
||||
{
|
||||
@ -132,9 +78,9 @@ static void get_srqc(void *srqc, struct mlx5_srq_attr *in)
|
||||
in->db_record = MLX5_GET64(srqc, srqc, dbr_addr);
|
||||
}
|
||||
|
||||
struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn)
|
||||
struct mlx5_core_srq *mlx5_cmd_get_srq(struct mlx5_ib_dev *dev, u32 srqn)
|
||||
{
|
||||
struct mlx5_srq_table *table = &dev->priv.srq_table;
|
||||
struct mlx5_srq_table *table = &dev->srq_table;
|
||||
struct mlx5_core_srq *srq;
|
||||
|
||||
spin_lock(&table->lock);
|
||||
@ -147,9 +93,8 @@ struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn)
|
||||
|
||||
return srq;
|
||||
}
|
||||
EXPORT_SYMBOL(mlx5_core_get_srq);
|
||||
|
||||
static int create_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
|
||||
static int create_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
|
||||
struct mlx5_srq_attr *in)
|
||||
{
|
||||
u32 create_out[MLX5_ST_SZ_DW(create_srq_out)] = {0};
|
||||
@ -176,7 +121,7 @@ static int create_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
|
||||
MLX5_SET(create_srq_in, create_in, opcode,
|
||||
MLX5_CMD_OP_CREATE_SRQ);
|
||||
|
||||
err = mlx5_cmd_exec(dev, create_in, inlen, create_out,
|
||||
err = mlx5_cmd_exec(dev->mdev, create_in, inlen, create_out,
|
||||
sizeof(create_out));
|
||||
kvfree(create_in);
|
||||
if (!err) {
|
||||
@ -187,8 +132,7 @@ static int create_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
|
||||
return err;
|
||||
}
|
||||
|
||||
static int destroy_srq_cmd(struct mlx5_core_dev *dev,
|
||||
struct mlx5_core_srq *srq)
|
||||
static int destroy_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq)
|
||||
{
|
||||
u32 srq_in[MLX5_ST_SZ_DW(destroy_srq_in)] = {0};
|
||||
u32 srq_out[MLX5_ST_SZ_DW(destroy_srq_out)] = {0};
|
||||
@ -198,11 +142,11 @@ static int destroy_srq_cmd(struct mlx5_core_dev *dev,
|
||||
MLX5_SET(destroy_srq_in, srq_in, srqn, srq->srqn);
|
||||
MLX5_SET(destroy_srq_in, srq_in, uid, srq->uid);
|
||||
|
||||
return mlx5_cmd_exec(dev, srq_in, sizeof(srq_in),
|
||||
srq_out, sizeof(srq_out));
|
||||
return mlx5_cmd_exec(dev->mdev, srq_in, sizeof(srq_in), srq_out,
|
||||
sizeof(srq_out));
|
||||
}
|
||||
|
||||
static int arm_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
|
||||
static int arm_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
|
||||
u16 lwm, int is_srq)
|
||||
{
|
||||
u32 srq_in[MLX5_ST_SZ_DW(arm_rq_in)] = {0};
|
||||
@ -214,11 +158,11 @@ static int arm_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
|
||||
MLX5_SET(arm_rq_in, srq_in, lwm, lwm);
|
||||
MLX5_SET(arm_rq_in, srq_in, uid, srq->uid);
|
||||
|
||||
return mlx5_cmd_exec(dev, srq_in, sizeof(srq_in),
|
||||
srq_out, sizeof(srq_out));
|
||||
return mlx5_cmd_exec(dev->mdev, srq_in, sizeof(srq_in), srq_out,
|
||||
sizeof(srq_out));
|
||||
}
|
||||
|
||||
static int query_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
|
||||
static int query_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
|
||||
struct mlx5_srq_attr *out)
|
||||
{
|
||||
u32 srq_in[MLX5_ST_SZ_DW(query_srq_in)] = {0};
|
||||
@ -233,8 +177,8 @@ static int query_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
|
||||
MLX5_SET(query_srq_in, srq_in, opcode,
|
||||
MLX5_CMD_OP_QUERY_SRQ);
|
||||
MLX5_SET(query_srq_in, srq_in, srqn, srq->srqn);
|
||||
err = mlx5_cmd_exec(dev, srq_in, sizeof(srq_in),
|
||||
srq_out, MLX5_ST_SZ_BYTES(query_srq_out));
|
||||
err = mlx5_cmd_exec(dev->mdev, srq_in, sizeof(srq_in), srq_out,
|
||||
MLX5_ST_SZ_BYTES(query_srq_out));
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
@ -247,7 +191,7 @@ out:
|
||||
return err;
|
||||
}
|
||||
|
||||
static int create_xrc_srq_cmd(struct mlx5_core_dev *dev,
|
||||
static int create_xrc_srq_cmd(struct mlx5_ib_dev *dev,
|
||||
struct mlx5_core_srq *srq,
|
||||
struct mlx5_srq_attr *in)
|
||||
{
|
||||
@ -277,7 +221,7 @@ static int create_xrc_srq_cmd(struct mlx5_core_dev *dev,
|
||||
MLX5_CMD_OP_CREATE_XRC_SRQ);
|
||||
|
||||
memset(create_out, 0, sizeof(create_out));
|
||||
err = mlx5_cmd_exec(dev, create_in, inlen, create_out,
|
||||
err = mlx5_cmd_exec(dev->mdev, create_in, inlen, create_out,
|
||||
sizeof(create_out));
|
||||
if (err)
|
||||
goto out;
|
||||
@ -289,7 +233,7 @@ out:
|
||||
return err;
|
||||
}
|
||||
|
||||
static int destroy_xrc_srq_cmd(struct mlx5_core_dev *dev,
|
||||
static int destroy_xrc_srq_cmd(struct mlx5_ib_dev *dev,
|
||||
struct mlx5_core_srq *srq)
|
||||
{
|
||||
u32 xrcsrq_in[MLX5_ST_SZ_DW(destroy_xrc_srq_in)] = {0};
|
||||
@ -300,12 +244,12 @@ static int destroy_xrc_srq_cmd(struct mlx5_core_dev *dev,
|
||||
MLX5_SET(destroy_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn);
|
||||
MLX5_SET(destroy_xrc_srq_in, xrcsrq_in, uid, srq->uid);
|
||||
|
||||
return mlx5_cmd_exec(dev, xrcsrq_in, sizeof(xrcsrq_in),
|
||||
return mlx5_cmd_exec(dev->mdev, xrcsrq_in, sizeof(xrcsrq_in),
|
||||
xrcsrq_out, sizeof(xrcsrq_out));
|
||||
}
|
||||
|
||||
static int arm_xrc_srq_cmd(struct mlx5_core_dev *dev,
|
||||
struct mlx5_core_srq *srq, u16 lwm)
|
||||
static int arm_xrc_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
|
||||
u16 lwm)
|
||||
{
|
||||
u32 xrcsrq_in[MLX5_ST_SZ_DW(arm_xrc_srq_in)] = {0};
|
||||
u32 xrcsrq_out[MLX5_ST_SZ_DW(arm_xrc_srq_out)] = {0};
|
||||
@ -316,11 +260,11 @@ static int arm_xrc_srq_cmd(struct mlx5_core_dev *dev,
|
||||
MLX5_SET(arm_xrc_srq_in, xrcsrq_in, lwm, lwm);
|
||||
MLX5_SET(arm_xrc_srq_in, xrcsrq_in, uid, srq->uid);
|
||||
|
||||
return mlx5_cmd_exec(dev, xrcsrq_in, sizeof(xrcsrq_in),
|
||||
return mlx5_cmd_exec(dev->mdev, xrcsrq_in, sizeof(xrcsrq_in),
|
||||
xrcsrq_out, sizeof(xrcsrq_out));
|
||||
}
|
||||
|
||||
static int query_xrc_srq_cmd(struct mlx5_core_dev *dev,
|
||||
static int query_xrc_srq_cmd(struct mlx5_ib_dev *dev,
|
||||
struct mlx5_core_srq *srq,
|
||||
struct mlx5_srq_attr *out)
|
||||
{
|
||||
@ -338,8 +282,8 @@ static int query_xrc_srq_cmd(struct mlx5_core_dev *dev,
|
||||
MLX5_CMD_OP_QUERY_XRC_SRQ);
|
||||
MLX5_SET(query_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn);
|
||||
|
||||
err = mlx5_cmd_exec(dev, xrcsrq_in, sizeof(xrcsrq_in), xrcsrq_out,
|
||||
MLX5_ST_SZ_BYTES(query_xrc_srq_out));
|
||||
err = mlx5_cmd_exec(dev->mdev, xrcsrq_in, sizeof(xrcsrq_in),
|
||||
xrcsrq_out, MLX5_ST_SZ_BYTES(query_xrc_srq_out));
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
@ -354,21 +298,27 @@ out:
|
||||
return err;
|
||||
}
|
||||
|
||||
static int create_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
|
||||
static int create_rmp_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
|
||||
struct mlx5_srq_attr *in)
|
||||
{
|
||||
void *create_in;
|
||||
void *create_out = NULL;
|
||||
void *create_in = NULL;
|
||||
void *rmpc;
|
||||
void *wq;
|
||||
int pas_size;
|
||||
int outlen;
|
||||
int inlen;
|
||||
int err;
|
||||
|
||||
pas_size = get_pas_size(in);
|
||||
inlen = MLX5_ST_SZ_BYTES(create_rmp_in) + pas_size;
|
||||
outlen = MLX5_ST_SZ_BYTES(create_rmp_out);
|
||||
create_in = kvzalloc(inlen, GFP_KERNEL);
|
||||
if (!create_in)
|
||||
return -ENOMEM;
|
||||
create_out = kvzalloc(outlen, GFP_KERNEL);
|
||||
if (!create_in || !create_out) {
|
||||
err = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
rmpc = MLX5_ADDR_OF(create_rmp_in, create_in, ctx);
|
||||
wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
|
||||
@ -378,16 +328,20 @@ static int create_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
|
||||
set_wq(wq, in);
|
||||
memcpy(MLX5_ADDR_OF(rmpc, rmpc, wq.pas), in->pas, pas_size);
|
||||
|
||||
err = mlx5_core_create_rmp(dev, create_in, inlen, &srq->srqn);
|
||||
if (!err)
|
||||
MLX5_SET(create_rmp_in, create_in, opcode, MLX5_CMD_OP_CREATE_RMP);
|
||||
err = mlx5_cmd_exec(dev->mdev, create_in, inlen, create_out, outlen);
|
||||
if (!err) {
|
||||
srq->srqn = MLX5_GET(create_rmp_out, create_out, rmpn);
|
||||
srq->uid = in->uid;
|
||||
}
|
||||
|
||||
out:
|
||||
kvfree(create_in);
|
||||
kvfree(create_out);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int destroy_rmp_cmd(struct mlx5_core_dev *dev,
|
||||
struct mlx5_core_srq *srq)
|
||||
static int destroy_rmp_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(destroy_rmp_in)] = {};
|
||||
u32 out[MLX5_ST_SZ_DW(destroy_rmp_out)] = {};
|
||||
@ -395,22 +349,30 @@ static int destroy_rmp_cmd(struct mlx5_core_dev *dev,
|
||||
MLX5_SET(destroy_rmp_in, in, opcode, MLX5_CMD_OP_DESTROY_RMP);
|
||||
MLX5_SET(destroy_rmp_in, in, rmpn, srq->srqn);
|
||||
MLX5_SET(destroy_rmp_in, in, uid, srq->uid);
|
||||
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
|
||||
return mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
|
||||
}
|
||||
|
||||
static int arm_rmp_cmd(struct mlx5_core_dev *dev,
|
||||
struct mlx5_core_srq *srq,
|
||||
static int arm_rmp_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
|
||||
u16 lwm)
|
||||
{
|
||||
void *in;
|
||||
void *out = NULL;
|
||||
void *in = NULL;
|
||||
void *rmpc;
|
||||
void *wq;
|
||||
void *bitmask;
|
||||
int outlen;
|
||||
int inlen;
|
||||
int err;
|
||||
|
||||
in = kvzalloc(MLX5_ST_SZ_BYTES(modify_rmp_in), GFP_KERNEL);
|
||||
if (!in)
|
||||
return -ENOMEM;
|
||||
inlen = MLX5_ST_SZ_BYTES(modify_rmp_in);
|
||||
outlen = MLX5_ST_SZ_BYTES(modify_rmp_out);
|
||||
|
||||
in = kvzalloc(inlen, GFP_KERNEL);
|
||||
out = kvzalloc(outlen, GFP_KERNEL);
|
||||
if (!in || !out) {
|
||||
err = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
rmpc = MLX5_ADDR_OF(modify_rmp_in, in, ctx);
|
||||
bitmask = MLX5_ADDR_OF(modify_rmp_in, in, bitmask);
|
||||
@ -422,25 +384,39 @@ static int arm_rmp_cmd(struct mlx5_core_dev *dev,
|
||||
MLX5_SET(wq, wq, lwm, lwm);
|
||||
MLX5_SET(rmp_bitmask, bitmask, lwm, 1);
|
||||
MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY);
|
||||
MLX5_SET(modify_rmp_in, in, opcode, MLX5_CMD_OP_MODIFY_RMP);
|
||||
|
||||
err = mlx5_core_modify_rmp(dev, in, MLX5_ST_SZ_BYTES(modify_rmp_in));
|
||||
err = mlx5_cmd_exec(dev->mdev, in, inlen, out, outlen);
|
||||
|
||||
out:
|
||||
kvfree(in);
|
||||
kvfree(out);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int query_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
|
||||
static int query_rmp_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
|
||||
struct mlx5_srq_attr *out)
|
||||
{
|
||||
u32 *rmp_out;
|
||||
u32 *rmp_out = NULL;
|
||||
u32 *rmp_in = NULL;
|
||||
void *rmpc;
|
||||
int outlen;
|
||||
int inlen;
|
||||
int err;
|
||||
|
||||
rmp_out = kvzalloc(MLX5_ST_SZ_BYTES(query_rmp_out), GFP_KERNEL);
|
||||
if (!rmp_out)
|
||||
return -ENOMEM;
|
||||
outlen = MLX5_ST_SZ_BYTES(query_rmp_out);
|
||||
inlen = MLX5_ST_SZ_BYTES(query_rmp_in);
|
||||
|
||||
err = mlx5_core_query_rmp(dev, srq->srqn, rmp_out);
|
||||
rmp_out = kvzalloc(outlen, GFP_KERNEL);
|
||||
rmp_in = kvzalloc(inlen, GFP_KERNEL);
|
||||
if (!rmp_out || !rmp_in) {
|
||||
err = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
MLX5_SET(query_rmp_in, rmp_in, opcode, MLX5_CMD_OP_QUERY_RMP);
|
||||
MLX5_SET(query_rmp_in, rmp_in, rmpn, srq->srqn);
|
||||
err = mlx5_cmd_exec(dev->mdev, rmp_in, inlen, rmp_out, outlen);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
@ -451,10 +427,11 @@ static int query_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
|
||||
|
||||
out:
|
||||
kvfree(rmp_out);
|
||||
kvfree(rmp_in);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int create_xrq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
|
||||
static int create_xrq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
|
||||
struct mlx5_srq_attr *in)
|
||||
{
|
||||
u32 create_out[MLX5_ST_SZ_DW(create_xrq_out)] = {0};
|
||||
@ -489,7 +466,7 @@ static int create_xrq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
|
||||
MLX5_SET(xrqc, xrqc, cqn, in->cqn);
|
||||
MLX5_SET(create_xrq_in, create_in, opcode, MLX5_CMD_OP_CREATE_XRQ);
|
||||
MLX5_SET(create_xrq_in, create_in, uid, in->uid);
|
||||
err = mlx5_cmd_exec(dev, create_in, inlen, create_out,
|
||||
err = mlx5_cmd_exec(dev->mdev, create_in, inlen, create_out,
|
||||
sizeof(create_out));
|
||||
kvfree(create_in);
|
||||
if (!err) {
|
||||
@ -500,7 +477,7 @@ static int create_xrq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
|
||||
return err;
|
||||
}
|
||||
|
||||
static int destroy_xrq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq)
|
||||
static int destroy_xrq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(destroy_xrq_in)] = {0};
|
||||
u32 out[MLX5_ST_SZ_DW(destroy_xrq_out)] = {0};
|
||||
@ -509,10 +486,10 @@ static int destroy_xrq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq)
|
||||
MLX5_SET(destroy_xrq_in, in, xrqn, srq->srqn);
|
||||
MLX5_SET(destroy_xrq_in, in, uid, srq->uid);
|
||||
|
||||
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
|
||||
return mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
|
||||
}
|
||||
|
||||
static int arm_xrq_cmd(struct mlx5_core_dev *dev,
|
||||
static int arm_xrq_cmd(struct mlx5_ib_dev *dev,
|
||||
struct mlx5_core_srq *srq,
|
||||
u16 lwm)
|
||||
{
|
||||
@ -525,10 +502,10 @@ static int arm_xrq_cmd(struct mlx5_core_dev *dev,
|
||||
MLX5_SET(arm_rq_in, in, lwm, lwm);
|
||||
MLX5_SET(arm_rq_in, in, uid, srq->uid);
|
||||
|
||||
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
|
||||
return mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
|
||||
}
|
||||
|
||||
static int query_xrq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
|
||||
static int query_xrq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
|
||||
struct mlx5_srq_attr *out)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(query_xrq_in)] = {0};
|
||||
@ -544,7 +521,7 @@ static int query_xrq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
|
||||
MLX5_SET(query_xrq_in, in, opcode, MLX5_CMD_OP_QUERY_XRQ);
|
||||
MLX5_SET(query_xrq_in, in, xrqn, srq->srqn);
|
||||
|
||||
err = mlx5_cmd_exec(dev, in, sizeof(in), xrq_out, outlen);
|
||||
err = mlx5_cmd_exec(dev->mdev, in, sizeof(in), xrq_out, outlen);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
@ -567,11 +544,10 @@ out:
|
||||
return err;
|
||||
}
|
||||
|
||||
static int create_srq_split(struct mlx5_core_dev *dev,
|
||||
struct mlx5_core_srq *srq,
|
||||
static int create_srq_split(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
|
||||
struct mlx5_srq_attr *in)
|
||||
{
|
||||
if (!dev->issi)
|
||||
if (!dev->mdev->issi)
|
||||
return create_srq_cmd(dev, srq, in);
|
||||
switch (srq->common.res) {
|
||||
case MLX5_RES_XSRQ:
|
||||
@ -583,10 +559,9 @@ static int create_srq_split(struct mlx5_core_dev *dev,
|
||||
}
|
||||
}
|
||||
|
||||
static int destroy_srq_split(struct mlx5_core_dev *dev,
|
||||
struct mlx5_core_srq *srq)
|
||||
static int destroy_srq_split(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq)
|
||||
{
|
||||
if (!dev->issi)
|
||||
if (!dev->mdev->issi)
|
||||
return destroy_srq_cmd(dev, srq);
|
||||
switch (srq->common.res) {
|
||||
case MLX5_RES_XSRQ:
|
||||
@ -598,11 +573,11 @@ static int destroy_srq_split(struct mlx5_core_dev *dev,
|
||||
}
|
||||
}
|
||||
|
||||
int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
|
||||
struct mlx5_srq_attr *in)
|
||||
int mlx5_cmd_create_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
|
||||
struct mlx5_srq_attr *in)
|
||||
{
|
||||
struct mlx5_srq_table *table = &dev->srq_table;
|
||||
int err;
|
||||
struct mlx5_srq_table *table = &dev->priv.srq_table;
|
||||
|
||||
switch (in->type) {
|
||||
case IB_SRQT_XRC:
|
||||
@ -625,10 +600,8 @@ int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
|
||||
spin_lock_irq(&table->lock);
|
||||
err = radix_tree_insert(&table->tree, srq->srqn, srq);
|
||||
spin_unlock_irq(&table->lock);
|
||||
if (err) {
|
||||
mlx5_core_warn(dev, "err %d, srqn 0x%x\n", err, srq->srqn);
|
||||
if (err)
|
||||
goto err_destroy_srq_split;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
@ -637,25 +610,18 @@ err_destroy_srq_split:
|
||||
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL(mlx5_core_create_srq);
|
||||
|
||||
int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq)
|
||||
int mlx5_cmd_destroy_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq)
|
||||
{
|
||||
struct mlx5_srq_table *table = &dev->priv.srq_table;
|
||||
struct mlx5_srq_table *table = &dev->srq_table;
|
||||
struct mlx5_core_srq *tmp;
|
||||
int err;
|
||||
|
||||
spin_lock_irq(&table->lock);
|
||||
tmp = radix_tree_delete(&table->tree, srq->srqn);
|
||||
spin_unlock_irq(&table->lock);
|
||||
if (!tmp) {
|
||||
mlx5_core_warn(dev, "srq 0x%x not found in tree\n", srq->srqn);
|
||||
if (!tmp || tmp != srq)
|
||||
return -EINVAL;
|
||||
}
|
||||
if (tmp != srq) {
|
||||
mlx5_core_warn(dev, "corruption on srqn 0x%x\n", srq->srqn);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
err = destroy_srq_split(dev, srq);
|
||||
if (err)
|
||||
@ -667,12 +633,11 @@ int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq)
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(mlx5_core_destroy_srq);
|
||||
|
||||
int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
|
||||
struct mlx5_srq_attr *out)
|
||||
int mlx5_cmd_query_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
|
||||
struct mlx5_srq_attr *out)
|
||||
{
|
||||
if (!dev->issi)
|
||||
if (!dev->mdev->issi)
|
||||
return query_srq_cmd(dev, srq, out);
|
||||
switch (srq->common.res) {
|
||||
case MLX5_RES_XSRQ:
|
||||
@ -683,12 +648,11 @@ int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
|
||||
return query_rmp_cmd(dev, srq, out);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(mlx5_core_query_srq);
|
||||
|
||||
int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
|
||||
u16 lwm, int is_srq)
|
||||
int mlx5_cmd_arm_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
|
||||
u16 lwm, int is_srq)
|
||||
{
|
||||
if (!dev->issi)
|
||||
if (!dev->mdev->issi)
|
||||
return arm_srq_cmd(dev, srq, lwm, is_srq);
|
||||
switch (srq->common.res) {
|
||||
case MLX5_RES_XSRQ:
|
||||
@ -699,18 +663,60 @@ int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
|
||||
return arm_rmp_cmd(dev, srq, lwm);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(mlx5_core_arm_srq);
|
||||
|
||||
void mlx5_init_srq_table(struct mlx5_core_dev *dev)
|
||||
static int srq_event_notifier(struct notifier_block *nb,
|
||||
unsigned long type, void *data)
|
||||
{
|
||||
struct mlx5_srq_table *table = &dev->priv.srq_table;
|
||||
struct mlx5_srq_table *table;
|
||||
struct mlx5_core_srq *srq;
|
||||
struct mlx5_eqe *eqe;
|
||||
u32 srqn;
|
||||
|
||||
if (type != MLX5_EVENT_TYPE_SRQ_CATAS_ERROR &&
|
||||
type != MLX5_EVENT_TYPE_SRQ_RQ_LIMIT)
|
||||
return NOTIFY_DONE;
|
||||
|
||||
table = container_of(nb, struct mlx5_srq_table, nb);
|
||||
|
||||
eqe = data;
|
||||
srqn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
|
||||
|
||||
spin_lock(&table->lock);
|
||||
|
||||
srq = radix_tree_lookup(&table->tree, srqn);
|
||||
if (srq)
|
||||
atomic_inc(&srq->refcount);
|
||||
|
||||
spin_unlock(&table->lock);
|
||||
|
||||
if (!srq)
|
||||
return NOTIFY_OK;
|
||||
|
||||
srq->event(srq, eqe->type);
|
||||
|
||||
if (atomic_dec_and_test(&srq->refcount))
|
||||
complete(&srq->free);
|
||||
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
int mlx5_init_srq_table(struct mlx5_ib_dev *dev)
|
||||
{
|
||||
struct mlx5_srq_table *table = &dev->srq_table;
|
||||
|
||||
memset(table, 0, sizeof(*table));
|
||||
spin_lock_init(&table->lock);
|
||||
INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
|
||||
|
||||
table->nb.notifier_call = srq_event_notifier;
|
||||
mlx5_notifier_register(dev->mdev, &table->nb);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void mlx5_cleanup_srq_table(struct mlx5_core_dev *dev)
|
||||
void mlx5_cleanup_srq_table(struct mlx5_ib_dev *dev)
|
||||
{
|
||||
/* nothing */
|
||||
struct mlx5_srq_table *table = &dev->srq_table;
|
||||
|
||||
mlx5_notifier_unregister(dev->mdev, &table->nb);
|
||||
}
|
@ -12,9 +12,9 @@ obj-$(CONFIG_MLX5_CORE) += mlx5_core.o
|
||||
# mlx5 core basic
|
||||
#
|
||||
mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
|
||||
health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o \
|
||||
health.o mcg.o cq.o alloc.o qp.o port.o mr.o pd.o \
|
||||
mad.o transobj.o vport.o sriov.o fs_cmd.o fs_core.o \
|
||||
fs_counters.o rl.o lag.o dev.o wq.o lib/gid.o \
|
||||
fs_counters.o rl.o lag.o dev.o events.o wq.o lib/gid.o \
|
||||
diag/fs_tracepoint.o diag/fw_tracer.o
|
||||
|
||||
#
|
||||
|
@ -40,9 +40,11 @@
|
||||
#include <linux/random.h>
|
||||
#include <linux/io-mapping.h>
|
||||
#include <linux/mlx5/driver.h>
|
||||
#include <linux/mlx5/eq.h>
|
||||
#include <linux/debugfs.h>
|
||||
|
||||
#include "mlx5_core.h"
|
||||
#include "lib/eq.h"
|
||||
|
||||
enum {
|
||||
CMD_IF_REV = 5,
|
||||
@ -805,6 +807,8 @@ static u16 msg_to_opcode(struct mlx5_cmd_msg *in)
|
||||
return MLX5_GET(mbox_in, in->first.data, opcode);
|
||||
}
|
||||
|
||||
static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced);
|
||||
|
||||
static void cb_timeout_handler(struct work_struct *work)
|
||||
{
|
||||
struct delayed_work *dwork = container_of(work, struct delayed_work,
|
||||
@ -1412,14 +1416,32 @@ static void mlx5_cmd_change_mod(struct mlx5_core_dev *dev, int mode)
|
||||
up(&cmd->sem);
|
||||
}
|
||||
|
||||
static int cmd_comp_notifier(struct notifier_block *nb,
|
||||
unsigned long type, void *data)
|
||||
{
|
||||
struct mlx5_core_dev *dev;
|
||||
struct mlx5_cmd *cmd;
|
||||
struct mlx5_eqe *eqe;
|
||||
|
||||
cmd = mlx5_nb_cof(nb, struct mlx5_cmd, nb);
|
||||
dev = container_of(cmd, struct mlx5_core_dev, cmd);
|
||||
eqe = data;
|
||||
|
||||
mlx5_cmd_comp_handler(dev, be32_to_cpu(eqe->data.cmd.vector), false);
|
||||
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
void mlx5_cmd_use_events(struct mlx5_core_dev *dev)
|
||||
{
|
||||
MLX5_NB_INIT(&dev->cmd.nb, cmd_comp_notifier, CMD);
|
||||
mlx5_eq_notifier_register(dev, &dev->cmd.nb);
|
||||
mlx5_cmd_change_mod(dev, CMD_MODE_EVENTS);
|
||||
}
|
||||
|
||||
void mlx5_cmd_use_polling(struct mlx5_core_dev *dev)
|
||||
{
|
||||
mlx5_cmd_change_mod(dev, CMD_MODE_POLLING);
|
||||
mlx5_eq_notifier_unregister(dev, &dev->cmd.nb);
|
||||
}
|
||||
|
||||
static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg)
|
||||
@ -1435,7 +1457,7 @@ static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg)
|
||||
}
|
||||
}
|
||||
|
||||
void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced)
|
||||
static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced)
|
||||
{
|
||||
struct mlx5_cmd *cmd = &dev->cmd;
|
||||
struct mlx5_cmd_work_ent *ent;
|
||||
@ -1533,7 +1555,29 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced)
|
||||
}
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(mlx5_cmd_comp_handler);
|
||||
|
||||
void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev)
|
||||
{
|
||||
unsigned long flags;
|
||||
u64 vector;
|
||||
|
||||
/* wait for pending handlers to complete */
|
||||
mlx5_eq_synchronize_cmd_irq(dev);
|
||||
spin_lock_irqsave(&dev->cmd.alloc_lock, flags);
|
||||
vector = ~dev->cmd.bitmask & ((1ul << (1 << dev->cmd.log_sz)) - 1);
|
||||
if (!vector)
|
||||
goto no_trig;
|
||||
|
||||
vector |= MLX5_TRIGGERED_CMD_COMP;
|
||||
spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags);
|
||||
|
||||
mlx5_core_dbg(dev, "vector 0x%llx\n", vector);
|
||||
mlx5_cmd_comp_handler(dev, vector, true);
|
||||
return;
|
||||
|
||||
no_trig:
|
||||
spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags);
|
||||
}
|
||||
|
||||
static int status_to_err(u8 status)
|
||||
{
|
||||
|
@ -45,75 +45,11 @@ struct mlx5_device_context {
|
||||
unsigned long state;
|
||||
};
|
||||
|
||||
struct mlx5_delayed_event {
|
||||
struct list_head list;
|
||||
struct mlx5_core_dev *dev;
|
||||
enum mlx5_dev_event event;
|
||||
unsigned long param;
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5_INTERFACE_ADDED,
|
||||
MLX5_INTERFACE_ATTACHED,
|
||||
};
|
||||
|
||||
static void add_delayed_event(struct mlx5_priv *priv,
|
||||
struct mlx5_core_dev *dev,
|
||||
enum mlx5_dev_event event,
|
||||
unsigned long param)
|
||||
{
|
||||
struct mlx5_delayed_event *delayed_event;
|
||||
|
||||
delayed_event = kzalloc(sizeof(*delayed_event), GFP_ATOMIC);
|
||||
if (!delayed_event) {
|
||||
mlx5_core_err(dev, "event %d is missed\n", event);
|
||||
return;
|
||||
}
|
||||
|
||||
mlx5_core_dbg(dev, "Accumulating event %d\n", event);
|
||||
delayed_event->dev = dev;
|
||||
delayed_event->event = event;
|
||||
delayed_event->param = param;
|
||||
list_add_tail(&delayed_event->list, &priv->waiting_events_list);
|
||||
}
|
||||
|
||||
static void delayed_event_release(struct mlx5_device_context *dev_ctx,
|
||||
struct mlx5_priv *priv)
|
||||
{
|
||||
struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
|
||||
struct mlx5_delayed_event *de;
|
||||
struct mlx5_delayed_event *n;
|
||||
struct list_head temp;
|
||||
|
||||
INIT_LIST_HEAD(&temp);
|
||||
|
||||
spin_lock_irq(&priv->ctx_lock);
|
||||
|
||||
priv->is_accum_events = false;
|
||||
list_splice_init(&priv->waiting_events_list, &temp);
|
||||
if (!dev_ctx->context)
|
||||
goto out;
|
||||
list_for_each_entry_safe(de, n, &temp, list)
|
||||
dev_ctx->intf->event(dev, dev_ctx->context, de->event, de->param);
|
||||
|
||||
out:
|
||||
spin_unlock_irq(&priv->ctx_lock);
|
||||
|
||||
list_for_each_entry_safe(de, n, &temp, list) {
|
||||
list_del(&de->list);
|
||||
kfree(de);
|
||||
}
|
||||
}
|
||||
|
||||
/* accumulating events that can come after mlx5_ib calls to
|
||||
* ib_register_device, till adding that interface to the events list.
|
||||
*/
|
||||
static void delayed_event_start(struct mlx5_priv *priv)
|
||||
{
|
||||
spin_lock_irq(&priv->ctx_lock);
|
||||
priv->is_accum_events = true;
|
||||
spin_unlock_irq(&priv->ctx_lock);
|
||||
}
|
||||
|
||||
void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
|
||||
{
|
||||
@ -129,8 +65,6 @@ void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
|
||||
|
||||
dev_ctx->intf = intf;
|
||||
|
||||
delayed_event_start(priv);
|
||||
|
||||
dev_ctx->context = intf->add(dev);
|
||||
if (dev_ctx->context) {
|
||||
set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
|
||||
@ -142,8 +76,6 @@ void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
|
||||
spin_unlock_irq(&priv->ctx_lock);
|
||||
}
|
||||
|
||||
delayed_event_release(dev_ctx, priv);
|
||||
|
||||
if (!dev_ctx->context)
|
||||
kfree(dev_ctx);
|
||||
}
|
||||
@ -187,26 +119,20 @@ static void mlx5_attach_interface(struct mlx5_interface *intf, struct mlx5_priv
|
||||
if (!dev_ctx)
|
||||
return;
|
||||
|
||||
delayed_event_start(priv);
|
||||
if (intf->attach) {
|
||||
if (test_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state))
|
||||
goto out;
|
||||
return;
|
||||
if (intf->attach(dev, dev_ctx->context))
|
||||
goto out;
|
||||
|
||||
return;
|
||||
set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state);
|
||||
} else {
|
||||
if (test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state))
|
||||
goto out;
|
||||
return;
|
||||
dev_ctx->context = intf->add(dev);
|
||||
if (!dev_ctx->context)
|
||||
goto out;
|
||||
|
||||
return;
|
||||
set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
|
||||
}
|
||||
|
||||
out:
|
||||
delayed_event_release(dev_ctx, priv);
|
||||
}
|
||||
|
||||
void mlx5_attach_device(struct mlx5_core_dev *dev)
|
||||
@ -402,30 +328,6 @@ struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev)
|
||||
return res;
|
||||
}
|
||||
|
||||
void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
|
||||
unsigned long param)
|
||||
{
|
||||
struct mlx5_priv *priv = &dev->priv;
|
||||
struct mlx5_device_context *dev_ctx;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&priv->ctx_lock, flags);
|
||||
|
||||
if (priv->is_accum_events)
|
||||
add_delayed_event(priv, dev, event, param);
|
||||
|
||||
/* After mlx5_detach_device, the dev_ctx->intf is still set and dev_ctx is
|
||||
* still in priv->ctx_list. In this case, only notify the dev_ctx if its
|
||||
* ADDED or ATTACHED bit are set.
|
||||
*/
|
||||
list_for_each_entry(dev_ctx, &priv->ctx_list, list)
|
||||
if (dev_ctx->intf->event &&
|
||||
(test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state) ||
|
||||
test_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state)))
|
||||
dev_ctx->intf->event(dev, dev_ctx->context, event, param);
|
||||
|
||||
spin_unlock_irqrestore(&priv->ctx_lock, flags);
|
||||
}
|
||||
|
||||
void mlx5_dev_list_lock(void)
|
||||
{
|
||||
|
@ -30,6 +30,7 @@
|
||||
* SOFTWARE.
|
||||
*/
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include "lib/eq.h"
|
||||
#include "fw_tracer.h"
|
||||
#include "fw_tracer_tracepoint.h"
|
||||
|
||||
@ -846,9 +847,9 @@ free_tracer:
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
/* Create HW resources + start tracer
|
||||
* must be called before Async EQ is created
|
||||
*/
|
||||
static int fw_tracer_event(struct notifier_block *nb, unsigned long action, void *data);
|
||||
|
||||
/* Create HW resources + start tracer */
|
||||
int mlx5_fw_tracer_init(struct mlx5_fw_tracer *tracer)
|
||||
{
|
||||
struct mlx5_core_dev *dev;
|
||||
@ -874,6 +875,9 @@ int mlx5_fw_tracer_init(struct mlx5_fw_tracer *tracer)
|
||||
goto err_dealloc_pd;
|
||||
}
|
||||
|
||||
MLX5_NB_INIT(&tracer->nb, fw_tracer_event, DEVICE_TRACER);
|
||||
mlx5_eq_notifier_register(dev, &tracer->nb);
|
||||
|
||||
mlx5_fw_tracer_start(tracer);
|
||||
|
||||
return 0;
|
||||
@ -883,9 +887,7 @@ err_dealloc_pd:
|
||||
return err;
|
||||
}
|
||||
|
||||
/* Stop tracer + Cleanup HW resources
|
||||
* must be called after Async EQ is destroyed
|
||||
*/
|
||||
/* Stop tracer + Cleanup HW resources */
|
||||
void mlx5_fw_tracer_cleanup(struct mlx5_fw_tracer *tracer)
|
||||
{
|
||||
if (IS_ERR_OR_NULL(tracer))
|
||||
@ -893,7 +895,7 @@ void mlx5_fw_tracer_cleanup(struct mlx5_fw_tracer *tracer)
|
||||
|
||||
mlx5_core_dbg(tracer->dev, "FWTracer: Cleanup, is owner ? (%d)\n",
|
||||
tracer->owner);
|
||||
|
||||
mlx5_eq_notifier_unregister(tracer->dev, &tracer->nb);
|
||||
cancel_work_sync(&tracer->ownership_change_work);
|
||||
cancel_work_sync(&tracer->handle_traces_work);
|
||||
|
||||
@ -922,12 +924,11 @@ void mlx5_fw_tracer_destroy(struct mlx5_fw_tracer *tracer)
|
||||
kfree(tracer);
|
||||
}
|
||||
|
||||
void mlx5_fw_tracer_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe)
|
||||
static int fw_tracer_event(struct notifier_block *nb, unsigned long action, void *data)
|
||||
{
|
||||
struct mlx5_fw_tracer *tracer = dev->tracer;
|
||||
|
||||
if (!tracer)
|
||||
return;
|
||||
struct mlx5_fw_tracer *tracer = mlx5_nb_cof(nb, struct mlx5_fw_tracer, nb);
|
||||
struct mlx5_core_dev *dev = tracer->dev;
|
||||
struct mlx5_eqe *eqe = data;
|
||||
|
||||
switch (eqe->sub_type) {
|
||||
case MLX5_TRACER_SUBTYPE_OWNERSHIP_CHANGE:
|
||||
@ -942,6 +943,8 @@ void mlx5_fw_tracer_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe)
|
||||
mlx5_core_dbg(dev, "FWTracer: Event with unrecognized subtype: sub_type %d\n",
|
||||
eqe->sub_type);
|
||||
}
|
||||
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
EXPORT_TRACEPOINT_SYMBOL(mlx5_fw);
|
||||
|
@ -55,6 +55,7 @@
|
||||
|
||||
struct mlx5_fw_tracer {
|
||||
struct mlx5_core_dev *dev;
|
||||
struct mlx5_nb nb;
|
||||
bool owner;
|
||||
u8 trc_ver;
|
||||
struct workqueue_struct *work_queue;
|
||||
@ -170,6 +171,5 @@ struct mlx5_fw_tracer *mlx5_fw_tracer_create(struct mlx5_core_dev *dev);
|
||||
int mlx5_fw_tracer_init(struct mlx5_fw_tracer *tracer);
|
||||
void mlx5_fw_tracer_cleanup(struct mlx5_fw_tracer *tracer);
|
||||
void mlx5_fw_tracer_destroy(struct mlx5_fw_tracer *tracer);
|
||||
void mlx5_fw_tracer_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe);
|
||||
|
||||
#endif
|
||||
|
@ -631,7 +631,6 @@ struct mlx5e_channel_stats {
|
||||
} ____cacheline_aligned_in_smp;
|
||||
|
||||
enum {
|
||||
MLX5E_STATE_ASYNC_EVENTS_ENABLED,
|
||||
MLX5E_STATE_OPENED,
|
||||
MLX5E_STATE_DESTROYING,
|
||||
};
|
||||
@ -690,6 +689,8 @@ struct mlx5e_priv {
|
||||
struct hwtstamp_config tstamp;
|
||||
u16 q_counter;
|
||||
u16 drop_rq_q_counter;
|
||||
struct notifier_block events_nb;
|
||||
|
||||
#ifdef CONFIG_MLX5_CORE_EN_DCB
|
||||
struct mlx5e_dcbx dcbx;
|
||||
#endif
|
||||
|
@ -294,33 +294,35 @@ void mlx5e_queue_update_stats(struct mlx5e_priv *priv)
|
||||
queue_work(priv->wq, &priv->update_stats_work);
|
||||
}
|
||||
|
||||
static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
|
||||
enum mlx5_dev_event event, unsigned long param)
|
||||
static int async_event(struct notifier_block *nb, unsigned long event, void *data)
|
||||
{
|
||||
struct mlx5e_priv *priv = vpriv;
|
||||
struct mlx5e_priv *priv = container_of(nb, struct mlx5e_priv, events_nb);
|
||||
struct mlx5_eqe *eqe = data;
|
||||
|
||||
if (!test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state))
|
||||
return;
|
||||
if (event != MLX5_EVENT_TYPE_PORT_CHANGE)
|
||||
return NOTIFY_DONE;
|
||||
|
||||
switch (event) {
|
||||
case MLX5_DEV_EVENT_PORT_UP:
|
||||
case MLX5_DEV_EVENT_PORT_DOWN:
|
||||
switch (eqe->sub_type) {
|
||||
case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
|
||||
case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
|
||||
queue_work(priv->wq, &priv->update_carrier_work);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
static void mlx5e_enable_async_events(struct mlx5e_priv *priv)
|
||||
{
|
||||
set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state);
|
||||
priv->events_nb.notifier_call = async_event;
|
||||
mlx5_notifier_register(priv->mdev, &priv->events_nb);
|
||||
}
|
||||
|
||||
static void mlx5e_disable_async_events(struct mlx5e_priv *priv)
|
||||
{
|
||||
clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state);
|
||||
mlx5_eq_synchronize_async_irq(priv->mdev);
|
||||
mlx5_notifier_unregister(priv->mdev, &priv->events_nb);
|
||||
}
|
||||
|
||||
static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
|
||||
@ -5170,7 +5172,6 @@ static struct mlx5_interface mlx5e_interface = {
|
||||
.remove = mlx5e_remove,
|
||||
.attach = mlx5e_attach,
|
||||
.detach = mlx5e_detach,
|
||||
.event = mlx5e_async_event,
|
||||
.protocol = MLX5_INTERFACE_PROTOCOL_ETH,
|
||||
.get_dev = mlx5e_get_netdev,
|
||||
};
|
||||
|
@ -30,6 +30,7 @@
|
||||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "lib/mlx5.h"
|
||||
#include "en.h"
|
||||
#include "en_accel/ipsec.h"
|
||||
#include "en_accel/tls.h"
|
||||
@ -1120,15 +1121,17 @@ static int mlx5e_grp_pme_fill_strings(struct mlx5e_priv *priv, u8 *data,
|
||||
static int mlx5e_grp_pme_fill_stats(struct mlx5e_priv *priv, u64 *data,
|
||||
int idx)
|
||||
{
|
||||
struct mlx5_priv *mlx5_priv = &priv->mdev->priv;
|
||||
struct mlx5_pme_stats pme_stats;
|
||||
int i;
|
||||
|
||||
mlx5_get_pme_stats(priv->mdev, &pme_stats);
|
||||
|
||||
for (i = 0; i < NUM_PME_STATUS_STATS; i++)
|
||||
data[idx++] = MLX5E_READ_CTR64_CPU(mlx5_priv->pme_stats.status_counters,
|
||||
data[idx++] = MLX5E_READ_CTR64_CPU(pme_stats.status_counters,
|
||||
mlx5e_pme_status_desc, i);
|
||||
|
||||
for (i = 0; i < NUM_PME_ERR_STATS; i++)
|
||||
data[idx++] = MLX5E_READ_CTR64_CPU(mlx5_priv->pme_stats.error_counters,
|
||||
data[idx++] = MLX5E_READ_CTR64_CPU(pme_stats.error_counters,
|
||||
mlx5e_pme_error_desc, i);
|
||||
|
||||
return idx;
|
||||
|
@ -31,6 +31,7 @@
|
||||
*/
|
||||
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/notifier.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/mlx5/driver.h>
|
||||
#include <linux/mlx5/eq.h>
|
||||
@ -68,8 +69,13 @@ struct mlx5_irq_info {
|
||||
struct mlx5_eq_table {
|
||||
struct list_head comp_eqs_list;
|
||||
struct mlx5_eq pages_eq;
|
||||
struct mlx5_eq async_eq;
|
||||
struct mlx5_eq cmd_eq;
|
||||
struct mlx5_eq async_eq;
|
||||
|
||||
struct atomic_notifier_head nh[MLX5_EVENT_TYPE_MAX];
|
||||
|
||||
/* Since CQ DB is stored in async_eq */
|
||||
struct mlx5_nb cq_err_nb;
|
||||
|
||||
struct mutex lock; /* sync async eqs creations */
|
||||
int num_comp_vectors;
|
||||
@ -102,121 +108,6 @@ static int mlx5_cmd_destroy_eq(struct mlx5_core_dev *dev, u8 eqn)
|
||||
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
|
||||
}
|
||||
|
||||
static const char *eqe_type_str(u8 type)
|
||||
{
|
||||
switch (type) {
|
||||
case MLX5_EVENT_TYPE_COMP:
|
||||
return "MLX5_EVENT_TYPE_COMP";
|
||||
case MLX5_EVENT_TYPE_PATH_MIG:
|
||||
return "MLX5_EVENT_TYPE_PATH_MIG";
|
||||
case MLX5_EVENT_TYPE_COMM_EST:
|
||||
return "MLX5_EVENT_TYPE_COMM_EST";
|
||||
case MLX5_EVENT_TYPE_SQ_DRAINED:
|
||||
return "MLX5_EVENT_TYPE_SQ_DRAINED";
|
||||
case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
|
||||
return "MLX5_EVENT_TYPE_SRQ_LAST_WQE";
|
||||
case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
|
||||
return "MLX5_EVENT_TYPE_SRQ_RQ_LIMIT";
|
||||
case MLX5_EVENT_TYPE_CQ_ERROR:
|
||||
return "MLX5_EVENT_TYPE_CQ_ERROR";
|
||||
case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
|
||||
return "MLX5_EVENT_TYPE_WQ_CATAS_ERROR";
|
||||
case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
|
||||
return "MLX5_EVENT_TYPE_PATH_MIG_FAILED";
|
||||
case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
|
||||
return "MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR";
|
||||
case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
|
||||
return "MLX5_EVENT_TYPE_WQ_ACCESS_ERROR";
|
||||
case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
|
||||
return "MLX5_EVENT_TYPE_SRQ_CATAS_ERROR";
|
||||
case MLX5_EVENT_TYPE_INTERNAL_ERROR:
|
||||
return "MLX5_EVENT_TYPE_INTERNAL_ERROR";
|
||||
case MLX5_EVENT_TYPE_PORT_CHANGE:
|
||||
return "MLX5_EVENT_TYPE_PORT_CHANGE";
|
||||
case MLX5_EVENT_TYPE_GPIO_EVENT:
|
||||
return "MLX5_EVENT_TYPE_GPIO_EVENT";
|
||||
case MLX5_EVENT_TYPE_PORT_MODULE_EVENT:
|
||||
return "MLX5_EVENT_TYPE_PORT_MODULE_EVENT";
|
||||
case MLX5_EVENT_TYPE_TEMP_WARN_EVENT:
|
||||
return "MLX5_EVENT_TYPE_TEMP_WARN_EVENT";
|
||||
case MLX5_EVENT_TYPE_REMOTE_CONFIG:
|
||||
return "MLX5_EVENT_TYPE_REMOTE_CONFIG";
|
||||
case MLX5_EVENT_TYPE_DB_BF_CONGESTION:
|
||||
return "MLX5_EVENT_TYPE_DB_BF_CONGESTION";
|
||||
case MLX5_EVENT_TYPE_STALL_EVENT:
|
||||
return "MLX5_EVENT_TYPE_STALL_EVENT";
|
||||
case MLX5_EVENT_TYPE_CMD:
|
||||
return "MLX5_EVENT_TYPE_CMD";
|
||||
case MLX5_EVENT_TYPE_PAGE_REQUEST:
|
||||
return "MLX5_EVENT_TYPE_PAGE_REQUEST";
|
||||
case MLX5_EVENT_TYPE_PAGE_FAULT:
|
||||
return "MLX5_EVENT_TYPE_PAGE_FAULT";
|
||||
case MLX5_EVENT_TYPE_PPS_EVENT:
|
||||
return "MLX5_EVENT_TYPE_PPS_EVENT";
|
||||
case MLX5_EVENT_TYPE_NIC_VPORT_CHANGE:
|
||||
return "MLX5_EVENT_TYPE_NIC_VPORT_CHANGE";
|
||||
case MLX5_EVENT_TYPE_FPGA_ERROR:
|
||||
return "MLX5_EVENT_TYPE_FPGA_ERROR";
|
||||
case MLX5_EVENT_TYPE_FPGA_QP_ERROR:
|
||||
return "MLX5_EVENT_TYPE_FPGA_QP_ERROR";
|
||||
case MLX5_EVENT_TYPE_GENERAL_EVENT:
|
||||
return "MLX5_EVENT_TYPE_GENERAL_EVENT";
|
||||
case MLX5_EVENT_TYPE_DEVICE_TRACER:
|
||||
return "MLX5_EVENT_TYPE_DEVICE_TRACER";
|
||||
default:
|
||||
return "Unrecognized event";
|
||||
}
|
||||
}
|
||||
|
||||
static enum mlx5_dev_event port_subtype_event(u8 subtype)
|
||||
{
|
||||
switch (subtype) {
|
||||
case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
|
||||
return MLX5_DEV_EVENT_PORT_DOWN;
|
||||
case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
|
||||
return MLX5_DEV_EVENT_PORT_UP;
|
||||
case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED:
|
||||
return MLX5_DEV_EVENT_PORT_INITIALIZED;
|
||||
case MLX5_PORT_CHANGE_SUBTYPE_LID:
|
||||
return MLX5_DEV_EVENT_LID_CHANGE;
|
||||
case MLX5_PORT_CHANGE_SUBTYPE_PKEY:
|
||||
return MLX5_DEV_EVENT_PKEY_CHANGE;
|
||||
case MLX5_PORT_CHANGE_SUBTYPE_GUID:
|
||||
return MLX5_DEV_EVENT_GUID_CHANGE;
|
||||
case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG:
|
||||
return MLX5_DEV_EVENT_CLIENT_REREG;
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
static void general_event_handler(struct mlx5_core_dev *dev,
|
||||
struct mlx5_eqe *eqe)
|
||||
{
|
||||
switch (eqe->sub_type) {
|
||||
case MLX5_GENERAL_SUBTYPE_DELAY_DROP_TIMEOUT:
|
||||
if (dev->event)
|
||||
dev->event(dev, MLX5_DEV_EVENT_DELAY_DROP_TIMEOUT, 0);
|
||||
break;
|
||||
default:
|
||||
mlx5_core_dbg(dev, "General event with unrecognized subtype: sub_type %d\n",
|
||||
eqe->sub_type);
|
||||
}
|
||||
}
|
||||
|
||||
static void mlx5_temp_warning_event(struct mlx5_core_dev *dev,
|
||||
struct mlx5_eqe *eqe)
|
||||
{
|
||||
u64 value_lsb;
|
||||
u64 value_msb;
|
||||
|
||||
value_lsb = be64_to_cpu(eqe->data.temp_warning.sensor_warning_lsb);
|
||||
value_msb = be64_to_cpu(eqe->data.temp_warning.sensor_warning_msb);
|
||||
|
||||
mlx5_core_warn(dev,
|
||||
"High temperature on sensors with bit set %llx %llx",
|
||||
value_msb, value_lsb);
|
||||
}
|
||||
|
||||
/* caller must eventually call mlx5_cq_put on the returned cq */
|
||||
static struct mlx5_core_cq *mlx5_eq_cq_get(struct mlx5_eq *eq, u32 cqn)
|
||||
{
|
||||
@ -232,20 +123,6 @@ static struct mlx5_core_cq *mlx5_eq_cq_get(struct mlx5_eq *eq, u32 cqn)
|
||||
return cq;
|
||||
}
|
||||
|
||||
static void mlx5_eq_cq_event(struct mlx5_eq *eq, u32 cqn, int event_type)
|
||||
{
|
||||
struct mlx5_core_cq *cq = mlx5_eq_cq_get(eq, cqn);
|
||||
|
||||
if (unlikely(!cq)) {
|
||||
mlx5_core_warn(eq->dev, "Async event for bogus CQ 0x%x\n", cqn);
|
||||
return;
|
||||
}
|
||||
|
||||
cq->event(cq, event_type);
|
||||
|
||||
mlx5_cq_put(cq);
|
||||
}
|
||||
|
||||
static irqreturn_t mlx5_eq_comp_int(int irq, void *eq_ptr)
|
||||
{
|
||||
struct mlx5_eq_comp *eq_comp = eq_ptr;
|
||||
@ -316,12 +193,13 @@ u32 mlx5_eq_poll_irq_disabled(struct mlx5_eq_comp *eq)
|
||||
static irqreturn_t mlx5_eq_async_int(int irq, void *eq_ptr)
|
||||
{
|
||||
struct mlx5_eq *eq = eq_ptr;
|
||||
struct mlx5_core_dev *dev = eq->dev;
|
||||
struct mlx5_eq_table *eqt;
|
||||
struct mlx5_core_dev *dev;
|
||||
struct mlx5_eqe *eqe;
|
||||
int set_ci = 0;
|
||||
u32 cqn = -1;
|
||||
u32 rsn;
|
||||
u8 port;
|
||||
|
||||
dev = eq->dev;
|
||||
eqt = dev->priv.eq_table;
|
||||
|
||||
while ((eqe = next_eqe_sw(eq))) {
|
||||
/*
|
||||
@ -330,112 +208,12 @@ static irqreturn_t mlx5_eq_async_int(int irq, void *eq_ptr)
|
||||
*/
|
||||
dma_rmb();
|
||||
|
||||
mlx5_core_dbg(eq->dev, "eqn %d, eqe type %s\n",
|
||||
eq->eqn, eqe_type_str(eqe->type));
|
||||
switch (eqe->type) {
|
||||
case MLX5_EVENT_TYPE_DCT_DRAINED:
|
||||
rsn = be32_to_cpu(eqe->data.dct.dctn) & 0xffffff;
|
||||
rsn |= (MLX5_RES_DCT << MLX5_USER_INDEX_LEN);
|
||||
mlx5_rsc_event(dev, rsn, eqe->type);
|
||||
break;
|
||||
case MLX5_EVENT_TYPE_PATH_MIG:
|
||||
case MLX5_EVENT_TYPE_COMM_EST:
|
||||
case MLX5_EVENT_TYPE_SQ_DRAINED:
|
||||
case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
|
||||
case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
|
||||
case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
|
||||
case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
|
||||
case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
|
||||
rsn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
|
||||
rsn |= (eqe->data.qp_srq.type << MLX5_USER_INDEX_LEN);
|
||||
mlx5_core_dbg(dev, "event %s(%d) arrived on resource 0x%x\n",
|
||||
eqe_type_str(eqe->type), eqe->type, rsn);
|
||||
mlx5_rsc_event(dev, rsn, eqe->type);
|
||||
break;
|
||||
if (likely(eqe->type < MLX5_EVENT_TYPE_MAX))
|
||||
atomic_notifier_call_chain(&eqt->nh[eqe->type], eqe->type, eqe);
|
||||
else
|
||||
mlx5_core_warn_once(dev, "notifier_call_chain is not setup for eqe: %d\n", eqe->type);
|
||||
|
||||
case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
|
||||
case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
|
||||
rsn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
|
||||
mlx5_core_dbg(dev, "SRQ event %s(%d): srqn 0x%x\n",
|
||||
eqe_type_str(eqe->type), eqe->type, rsn);
|
||||
mlx5_srq_event(dev, rsn, eqe->type);
|
||||
break;
|
||||
|
||||
case MLX5_EVENT_TYPE_CMD:
|
||||
mlx5_cmd_comp_handler(dev, be32_to_cpu(eqe->data.cmd.vector), false);
|
||||
break;
|
||||
|
||||
case MLX5_EVENT_TYPE_PORT_CHANGE:
|
||||
port = (eqe->data.port.port >> 4) & 0xf;
|
||||
switch (eqe->sub_type) {
|
||||
case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
|
||||
case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
|
||||
case MLX5_PORT_CHANGE_SUBTYPE_LID:
|
||||
case MLX5_PORT_CHANGE_SUBTYPE_PKEY:
|
||||
case MLX5_PORT_CHANGE_SUBTYPE_GUID:
|
||||
case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG:
|
||||
case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED:
|
||||
if (dev->event)
|
||||
dev->event(dev, port_subtype_event(eqe->sub_type),
|
||||
(unsigned long)port);
|
||||
break;
|
||||
default:
|
||||
mlx5_core_warn(dev, "Port event with unrecognized subtype: port %d, sub_type %d\n",
|
||||
port, eqe->sub_type);
|
||||
}
|
||||
break;
|
||||
case MLX5_EVENT_TYPE_CQ_ERROR:
|
||||
cqn = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff;
|
||||
mlx5_core_warn(dev, "CQ error on CQN 0x%x, syndrome 0x%x\n",
|
||||
cqn, eqe->data.cq_err.syndrome);
|
||||
mlx5_eq_cq_event(eq, cqn, eqe->type);
|
||||
break;
|
||||
|
||||
case MLX5_EVENT_TYPE_PAGE_REQUEST:
|
||||
{
|
||||
u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id);
|
||||
s32 npages = be32_to_cpu(eqe->data.req_pages.num_pages);
|
||||
|
||||
mlx5_core_dbg(dev, "page request for func 0x%x, npages %d\n",
|
||||
func_id, npages);
|
||||
mlx5_core_req_pages_handler(dev, func_id, npages);
|
||||
}
|
||||
break;
|
||||
|
||||
case MLX5_EVENT_TYPE_NIC_VPORT_CHANGE:
|
||||
mlx5_eswitch_vport_event(dev->priv.eswitch, eqe);
|
||||
break;
|
||||
|
||||
case MLX5_EVENT_TYPE_PORT_MODULE_EVENT:
|
||||
mlx5_port_module_event(dev, eqe);
|
||||
break;
|
||||
|
||||
case MLX5_EVENT_TYPE_PPS_EVENT:
|
||||
mlx5_pps_event(dev, eqe);
|
||||
break;
|
||||
|
||||
case MLX5_EVENT_TYPE_FPGA_ERROR:
|
||||
case MLX5_EVENT_TYPE_FPGA_QP_ERROR:
|
||||
mlx5_fpga_event(dev, eqe->type, &eqe->data.raw);
|
||||
break;
|
||||
|
||||
case MLX5_EVENT_TYPE_TEMP_WARN_EVENT:
|
||||
mlx5_temp_warning_event(dev, eqe);
|
||||
break;
|
||||
|
||||
case MLX5_EVENT_TYPE_GENERAL_EVENT:
|
||||
general_event_handler(dev, eqe);
|
||||
break;
|
||||
|
||||
case MLX5_EVENT_TYPE_DEVICE_TRACER:
|
||||
mlx5_fw_tracer_event(dev, eqe);
|
||||
break;
|
||||
|
||||
default:
|
||||
mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n",
|
||||
eqe->type, eq->eqn);
|
||||
break;
|
||||
}
|
||||
atomic_notifier_call_chain(&eqt->nh[MLX5_EVENT_TYPE_NOTIFY_ANY], eqe->type, eqe);
|
||||
|
||||
++eq->cons_index;
|
||||
++set_ci;
|
||||
@ -625,7 +403,7 @@ int mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq)
|
||||
int mlx5_eq_table_init(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct mlx5_eq_table *eq_table;
|
||||
int err;
|
||||
int i, err;
|
||||
|
||||
eq_table = kvzalloc(sizeof(*eq_table), GFP_KERNEL);
|
||||
if (!eq_table)
|
||||
@ -638,6 +416,8 @@ int mlx5_eq_table_init(struct mlx5_core_dev *dev)
|
||||
goto kvfree_eq_table;
|
||||
|
||||
mutex_init(&eq_table->lock);
|
||||
for (i = 0; i < MLX5_EVENT_TYPE_MAX; i++)
|
||||
ATOMIC_INIT_NOTIFIER_HEAD(&eq_table->nh[i]);
|
||||
|
||||
return 0;
|
||||
|
||||
@ -684,6 +464,38 @@ static int destroy_async_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
|
||||
return err;
|
||||
}
|
||||
|
||||
static int cq_err_event_notifier(struct notifier_block *nb,
|
||||
unsigned long type, void *data)
|
||||
{
|
||||
struct mlx5_eq_table *eqt;
|
||||
struct mlx5_core_cq *cq;
|
||||
struct mlx5_eqe *eqe;
|
||||
struct mlx5_eq *eq;
|
||||
u32 cqn;
|
||||
|
||||
/* type == MLX5_EVENT_TYPE_CQ_ERROR */
|
||||
|
||||
eqt = mlx5_nb_cof(nb, struct mlx5_eq_table, cq_err_nb);
|
||||
eq = &eqt->async_eq;
|
||||
eqe = data;
|
||||
|
||||
cqn = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff;
|
||||
mlx5_core_warn(eq->dev, "CQ error on CQN 0x%x, syndrome 0x%x\n",
|
||||
cqn, eqe->data.cq_err.syndrome);
|
||||
|
||||
cq = mlx5_eq_cq_get(eq, cqn);
|
||||
if (unlikely(!cq)) {
|
||||
mlx5_core_warn(eq->dev, "Async event for bogus CQ 0x%x\n", cqn);
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
cq->event(cq, type);
|
||||
|
||||
mlx5_cq_put(cq);
|
||||
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
static u64 gather_async_events_mask(struct mlx5_core_dev *dev)
|
||||
{
|
||||
u64 async_event_mask = MLX5_ASYNC_EVENT_MASK;
|
||||
@ -724,6 +536,9 @@ static int create_async_eqs(struct mlx5_core_dev *dev)
|
||||
struct mlx5_eq_param param = {};
|
||||
int err;
|
||||
|
||||
MLX5_NB_INIT(&table->cq_err_nb, cq_err_event_notifier, CQ_ERROR);
|
||||
mlx5_eq_notifier_register(dev, &table->cq_err_nb);
|
||||
|
||||
param = (struct mlx5_eq_param) {
|
||||
.index = MLX5_EQ_CMD_IDX,
|
||||
.mask = 1ull << MLX5_EVENT_TYPE_CMD,
|
||||
@ -734,7 +549,7 @@ static int create_async_eqs(struct mlx5_core_dev *dev)
|
||||
err = create_async_eq(dev, "mlx5_cmd_eq", &table->cmd_eq, ¶m);
|
||||
if (err) {
|
||||
mlx5_core_warn(dev, "failed to create cmd EQ %d\n", err);
|
||||
return err;
|
||||
goto err0;
|
||||
}
|
||||
|
||||
mlx5_cmd_use_events(dev);
|
||||
@ -773,6 +588,8 @@ err2:
|
||||
err1:
|
||||
mlx5_cmd_use_polling(dev);
|
||||
destroy_async_eq(dev, &table->cmd_eq);
|
||||
err0:
|
||||
mlx5_eq_notifier_unregister(dev, &table->cq_err_nb);
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -790,12 +607,15 @@ static void destroy_async_eqs(struct mlx5_core_dev *dev)
|
||||
if (err)
|
||||
mlx5_core_err(dev, "failed to destroy async eq, err(%d)\n",
|
||||
err);
|
||||
|
||||
mlx5_cmd_use_polling(dev);
|
||||
|
||||
err = destroy_async_eq(dev, &table->cmd_eq);
|
||||
if (err)
|
||||
mlx5_core_err(dev, "failed to destroy command eq, err(%d)\n",
|
||||
err);
|
||||
|
||||
mlx5_eq_notifier_unregister(dev, &table->cq_err_nb);
|
||||
}
|
||||
|
||||
struct mlx5_eq *mlx5_get_async_eq(struct mlx5_core_dev *dev)
|
||||
@ -1202,3 +1022,23 @@ void mlx5_eq_table_destroy(struct mlx5_core_dev *dev)
|
||||
destroy_async_eqs(dev);
|
||||
free_irq_vectors(dev);
|
||||
}
|
||||
|
||||
int mlx5_eq_notifier_register(struct mlx5_core_dev *dev, struct mlx5_nb *nb)
|
||||
{
|
||||
struct mlx5_eq_table *eqt = dev->priv.eq_table;
|
||||
|
||||
if (nb->event_type >= MLX5_EVENT_TYPE_MAX)
|
||||
return -EINVAL;
|
||||
|
||||
return atomic_notifier_chain_register(&eqt->nh[nb->event_type], &nb->nb);
|
||||
}
|
||||
|
||||
int mlx5_eq_notifier_unregister(struct mlx5_core_dev *dev, struct mlx5_nb *nb)
|
||||
{
|
||||
struct mlx5_eq_table *eqt = dev->priv.eq_table;
|
||||
|
||||
if (nb->event_type >= MLX5_EVENT_TYPE_MAX)
|
||||
return -EINVAL;
|
||||
|
||||
return atomic_notifier_chain_unregister(&eqt->nh[nb->event_type], &nb->nb);
|
||||
}
|
||||
|
@ -36,6 +36,7 @@
|
||||
#include <linux/mlx5/vport.h>
|
||||
#include <linux/mlx5/fs.h>
|
||||
#include "mlx5_core.h"
|
||||
#include "lib/eq.h"
|
||||
#include "eswitch.h"
|
||||
#include "fs_core.h"
|
||||
#include "lib/eq.h"
|
||||
@ -1568,7 +1569,6 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num)
|
||||
/* Mark this vport as disabled to discard new events */
|
||||
vport->enabled = false;
|
||||
|
||||
mlx5_eq_synchronize_async_irq(esw->dev);
|
||||
/* Wait for current already scheduled events to complete */
|
||||
flush_workqueue(esw->work_queue);
|
||||
/* Disable events from this vport */
|
||||
@ -1594,10 +1594,25 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num)
|
||||
mutex_unlock(&esw->state_lock);
|
||||
}
|
||||
|
||||
static int eswitch_vport_event(struct notifier_block *nb,
|
||||
unsigned long type, void *data)
|
||||
{
|
||||
struct mlx5_eswitch *esw = mlx5_nb_cof(nb, struct mlx5_eswitch, nb);
|
||||
struct mlx5_eqe *eqe = data;
|
||||
struct mlx5_vport *vport;
|
||||
u16 vport_num;
|
||||
|
||||
vport_num = be16_to_cpu(eqe->data.vport_change.vport_num);
|
||||
vport = &esw->vports[vport_num];
|
||||
if (vport->enabled)
|
||||
queue_work(esw->work_queue, &vport->vport_change_handler);
|
||||
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
/* Public E-Switch API */
|
||||
#define ESW_ALLOWED(esw) ((esw) && MLX5_ESWITCH_MANAGER((esw)->dev))
|
||||
|
||||
|
||||
int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
|
||||
{
|
||||
int err;
|
||||
@ -1641,6 +1656,11 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
|
||||
for (i = 0; i <= nvfs; i++)
|
||||
esw_enable_vport(esw, i, enabled_events);
|
||||
|
||||
if (mode == SRIOV_LEGACY) {
|
||||
MLX5_NB_INIT(&esw->nb, eswitch_vport_event, NIC_VPORT_CHANGE);
|
||||
mlx5_eq_notifier_register(esw->dev, &esw->nb);
|
||||
}
|
||||
|
||||
esw_info(esw->dev, "SRIOV enabled: active vports(%d)\n",
|
||||
esw->enabled_vports);
|
||||
return 0;
|
||||
@ -1670,6 +1690,9 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw)
|
||||
mc_promisc = &esw->mc_promisc;
|
||||
nvports = esw->enabled_vports;
|
||||
|
||||
if (esw->mode == SRIOV_LEGACY)
|
||||
mlx5_eq_notifier_unregister(esw->dev, &esw->nb);
|
||||
|
||||
for (i = 0; i < esw->total_vports; i++)
|
||||
esw_disable_vport(esw, i);
|
||||
|
||||
@ -1778,23 +1801,6 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
|
||||
kfree(esw);
|
||||
}
|
||||
|
||||
void mlx5_eswitch_vport_event(struct mlx5_eswitch *esw, struct mlx5_eqe *eqe)
|
||||
{
|
||||
struct mlx5_eqe_vport_change *vc_eqe = &eqe->data.vport_change;
|
||||
u16 vport_num = be16_to_cpu(vc_eqe->vport_num);
|
||||
struct mlx5_vport *vport;
|
||||
|
||||
if (!esw) {
|
||||
pr_warn("MLX5 E-Switch: vport %d got an event while eswitch is not initialized\n",
|
||||
vport_num);
|
||||
return;
|
||||
}
|
||||
|
||||
vport = &esw->vports[vport_num];
|
||||
if (vport->enabled)
|
||||
queue_work(esw->work_queue, &vport->vport_change_handler);
|
||||
}
|
||||
|
||||
/* Vport Administration */
|
||||
#define LEGAL_VPORT(esw, vport) (vport >= 0 && vport < esw->total_vports)
|
||||
|
||||
|
@ -181,6 +181,7 @@ struct esw_mc_addr { /* SRIOV only */
|
||||
|
||||
struct mlx5_eswitch {
|
||||
struct mlx5_core_dev *dev;
|
||||
struct mlx5_nb nb;
|
||||
struct mlx5_eswitch_fdb fdb_table;
|
||||
struct hlist_head mc_table[MLX5_L2_ADDR_HASH_SIZE];
|
||||
struct workqueue_struct *work_queue;
|
||||
@ -211,7 +212,6 @@ int esw_offloads_init_reps(struct mlx5_eswitch *esw);
|
||||
/* E-Switch API */
|
||||
int mlx5_eswitch_init(struct mlx5_core_dev *dev);
|
||||
void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw);
|
||||
void mlx5_eswitch_vport_event(struct mlx5_eswitch *esw, struct mlx5_eqe *eqe);
|
||||
int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode);
|
||||
void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw);
|
||||
int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
|
||||
@ -352,7 +352,6 @@ static inline bool mlx5_eswitch_vlan_actions_supported(struct mlx5_core_dev *dev
|
||||
/* eswitch API stubs */
|
||||
static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
|
||||
static inline void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) {}
|
||||
static inline void mlx5_eswitch_vport_event(struct mlx5_eswitch *esw, struct mlx5_eqe *eqe) {}
|
||||
static inline int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode) { return 0; }
|
||||
static inline void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw) {}
|
||||
|
||||
|
298
drivers/net/ethernet/mellanox/mlx5/core/events.c
Normal file
298
drivers/net/ethernet/mellanox/mlx5/core/events.c
Normal file
@ -0,0 +1,298 @@
|
||||
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
|
||||
// Copyright (c) 2018 Mellanox Technologies
|
||||
|
||||
#include <linux/mlx5/driver.h>
|
||||
|
||||
#include "mlx5_core.h"
|
||||
#include "lib/eq.h"
|
||||
#include "lib/mlx5.h"
|
||||
|
||||
struct mlx5_event_nb {
|
||||
struct mlx5_nb nb;
|
||||
void *ctx;
|
||||
};
|
||||
|
||||
/* General events handlers for the low level mlx5_core driver
|
||||
*
|
||||
* Other Major feature specific events such as
|
||||
* clock/eswitch/fpga/FW trace and many others, are handled elsewhere, with
|
||||
* separate notifiers callbacks, specifically by those mlx5 components.
|
||||
*/
|
||||
static int any_notifier(struct notifier_block *, unsigned long, void *);
|
||||
static int temp_warn(struct notifier_block *, unsigned long, void *);
|
||||
static int port_module(struct notifier_block *, unsigned long, void *);
|
||||
|
||||
/* handler which forwards the event to events->nh, driver notifiers */
|
||||
static int forward_event(struct notifier_block *, unsigned long, void *);
|
||||
|
||||
static struct mlx5_nb events_nbs_ref[] = {
|
||||
/* Events to be proccessed by mlx5_core */
|
||||
{.nb.notifier_call = any_notifier, .event_type = MLX5_EVENT_TYPE_NOTIFY_ANY },
|
||||
{.nb.notifier_call = temp_warn, .event_type = MLX5_EVENT_TYPE_TEMP_WARN_EVENT },
|
||||
{.nb.notifier_call = port_module, .event_type = MLX5_EVENT_TYPE_PORT_MODULE_EVENT },
|
||||
|
||||
/* Events to be forwarded (as is) to mlx5 core interfaces (mlx5e/mlx5_ib) */
|
||||
{.nb.notifier_call = forward_event, .event_type = MLX5_EVENT_TYPE_PORT_CHANGE },
|
||||
{.nb.notifier_call = forward_event, .event_type = MLX5_EVENT_TYPE_GENERAL_EVENT },
|
||||
/* QP/WQ resource events to forward */
|
||||
{.nb.notifier_call = forward_event, .event_type = MLX5_EVENT_TYPE_DCT_DRAINED },
|
||||
{.nb.notifier_call = forward_event, .event_type = MLX5_EVENT_TYPE_PATH_MIG },
|
||||
{.nb.notifier_call = forward_event, .event_type = MLX5_EVENT_TYPE_COMM_EST },
|
||||
{.nb.notifier_call = forward_event, .event_type = MLX5_EVENT_TYPE_SQ_DRAINED },
|
||||
{.nb.notifier_call = forward_event, .event_type = MLX5_EVENT_TYPE_SRQ_LAST_WQE },
|
||||
{.nb.notifier_call = forward_event, .event_type = MLX5_EVENT_TYPE_WQ_CATAS_ERROR },
|
||||
{.nb.notifier_call = forward_event, .event_type = MLX5_EVENT_TYPE_PATH_MIG_FAILED },
|
||||
{.nb.notifier_call = forward_event, .event_type = MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR },
|
||||
{.nb.notifier_call = forward_event, .event_type = MLX5_EVENT_TYPE_WQ_ACCESS_ERROR },
|
||||
/* SRQ events */
|
||||
{.nb.notifier_call = forward_event, .event_type = MLX5_EVENT_TYPE_SRQ_CATAS_ERROR },
|
||||
{.nb.notifier_call = forward_event, .event_type = MLX5_EVENT_TYPE_SRQ_RQ_LIMIT },
|
||||
};
|
||||
|
||||
struct mlx5_events {
|
||||
struct mlx5_core_dev *dev;
|
||||
struct mlx5_event_nb notifiers[ARRAY_SIZE(events_nbs_ref)];
|
||||
/* driver notifier chain */
|
||||
struct atomic_notifier_head nh;
|
||||
/* port module events stats */
|
||||
struct mlx5_pme_stats pme_stats;
|
||||
};
|
||||
|
||||
static const char *eqe_type_str(u8 type)
|
||||
{
|
||||
switch (type) {
|
||||
case MLX5_EVENT_TYPE_COMP:
|
||||
return "MLX5_EVENT_TYPE_COMP";
|
||||
case MLX5_EVENT_TYPE_PATH_MIG:
|
||||
return "MLX5_EVENT_TYPE_PATH_MIG";
|
||||
case MLX5_EVENT_TYPE_COMM_EST:
|
||||
return "MLX5_EVENT_TYPE_COMM_EST";
|
||||
case MLX5_EVENT_TYPE_SQ_DRAINED:
|
||||
return "MLX5_EVENT_TYPE_SQ_DRAINED";
|
||||
case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
|
||||
return "MLX5_EVENT_TYPE_SRQ_LAST_WQE";
|
||||
case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
|
||||
return "MLX5_EVENT_TYPE_SRQ_RQ_LIMIT";
|
||||
case MLX5_EVENT_TYPE_CQ_ERROR:
|
||||
return "MLX5_EVENT_TYPE_CQ_ERROR";
|
||||
case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
|
||||
return "MLX5_EVENT_TYPE_WQ_CATAS_ERROR";
|
||||
case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
|
||||
return "MLX5_EVENT_TYPE_PATH_MIG_FAILED";
|
||||
case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
|
||||
return "MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR";
|
||||
case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
|
||||
return "MLX5_EVENT_TYPE_WQ_ACCESS_ERROR";
|
||||
case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
|
||||
return "MLX5_EVENT_TYPE_SRQ_CATAS_ERROR";
|
||||
case MLX5_EVENT_TYPE_INTERNAL_ERROR:
|
||||
return "MLX5_EVENT_TYPE_INTERNAL_ERROR";
|
||||
case MLX5_EVENT_TYPE_PORT_CHANGE:
|
||||
return "MLX5_EVENT_TYPE_PORT_CHANGE";
|
||||
case MLX5_EVENT_TYPE_GPIO_EVENT:
|
||||
return "MLX5_EVENT_TYPE_GPIO_EVENT";
|
||||
case MLX5_EVENT_TYPE_PORT_MODULE_EVENT:
|
||||
return "MLX5_EVENT_TYPE_PORT_MODULE_EVENT";
|
||||
case MLX5_EVENT_TYPE_TEMP_WARN_EVENT:
|
||||
return "MLX5_EVENT_TYPE_TEMP_WARN_EVENT";
|
||||
case MLX5_EVENT_TYPE_REMOTE_CONFIG:
|
||||
return "MLX5_EVENT_TYPE_REMOTE_CONFIG";
|
||||
case MLX5_EVENT_TYPE_DB_BF_CONGESTION:
|
||||
return "MLX5_EVENT_TYPE_DB_BF_CONGESTION";
|
||||
case MLX5_EVENT_TYPE_STALL_EVENT:
|
||||
return "MLX5_EVENT_TYPE_STALL_EVENT";
|
||||
case MLX5_EVENT_TYPE_CMD:
|
||||
return "MLX5_EVENT_TYPE_CMD";
|
||||
case MLX5_EVENT_TYPE_PAGE_REQUEST:
|
||||
return "MLX5_EVENT_TYPE_PAGE_REQUEST";
|
||||
case MLX5_EVENT_TYPE_PAGE_FAULT:
|
||||
return "MLX5_EVENT_TYPE_PAGE_FAULT";
|
||||
case MLX5_EVENT_TYPE_PPS_EVENT:
|
||||
return "MLX5_EVENT_TYPE_PPS_EVENT";
|
||||
case MLX5_EVENT_TYPE_NIC_VPORT_CHANGE:
|
||||
return "MLX5_EVENT_TYPE_NIC_VPORT_CHANGE";
|
||||
case MLX5_EVENT_TYPE_FPGA_ERROR:
|
||||
return "MLX5_EVENT_TYPE_FPGA_ERROR";
|
||||
case MLX5_EVENT_TYPE_FPGA_QP_ERROR:
|
||||
return "MLX5_EVENT_TYPE_FPGA_QP_ERROR";
|
||||
case MLX5_EVENT_TYPE_GENERAL_EVENT:
|
||||
return "MLX5_EVENT_TYPE_GENERAL_EVENT";
|
||||
case MLX5_EVENT_TYPE_DEVICE_TRACER:
|
||||
return "MLX5_EVENT_TYPE_DEVICE_TRACER";
|
||||
default:
|
||||
return "Unrecognized event";
|
||||
}
|
||||
}
|
||||
|
||||
/* handles all FW events, type == eqe->type */
|
||||
static int any_notifier(struct notifier_block *nb,
|
||||
unsigned long type, void *data)
|
||||
{
|
||||
struct mlx5_event_nb *event_nb = mlx5_nb_cof(nb, struct mlx5_event_nb, nb);
|
||||
struct mlx5_events *events = event_nb->ctx;
|
||||
struct mlx5_eqe *eqe = data;
|
||||
|
||||
mlx5_core_dbg(events->dev, "Async eqe type %s, subtype (%d)\n",
|
||||
eqe_type_str(eqe->type), eqe->sub_type);
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
/* type == MLX5_EVENT_TYPE_TEMP_WARN_EVENT */
|
||||
static int temp_warn(struct notifier_block *nb, unsigned long type, void *data)
|
||||
{
|
||||
struct mlx5_event_nb *event_nb = mlx5_nb_cof(nb, struct mlx5_event_nb, nb);
|
||||
struct mlx5_events *events = event_nb->ctx;
|
||||
struct mlx5_eqe *eqe = data;
|
||||
u64 value_lsb;
|
||||
u64 value_msb;
|
||||
|
||||
value_lsb = be64_to_cpu(eqe->data.temp_warning.sensor_warning_lsb);
|
||||
value_msb = be64_to_cpu(eqe->data.temp_warning.sensor_warning_msb);
|
||||
|
||||
mlx5_core_warn(events->dev,
|
||||
"High temperature on sensors with bit set %llx %llx",
|
||||
value_msb, value_lsb);
|
||||
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
/* MLX5_EVENT_TYPE_PORT_MODULE_EVENT */
|
||||
static const char *mlx5_pme_status[MLX5_MODULE_STATUS_NUM] = {
|
||||
"Cable plugged", /* MLX5_MODULE_STATUS_PLUGGED = 0x1 */
|
||||
"Cable unplugged", /* MLX5_MODULE_STATUS_UNPLUGGED = 0x2 */
|
||||
"Cable error", /* MLX5_MODULE_STATUS_ERROR = 0x3 */
|
||||
};
|
||||
|
||||
static const char *mlx5_pme_error[MLX5_MODULE_EVENT_ERROR_NUM] = {
|
||||
"Power budget exceeded",
|
||||
"Long Range for non MLNX cable",
|
||||
"Bus stuck(I2C or data shorted)",
|
||||
"No EEPROM/retry timeout",
|
||||
"Enforce part number list",
|
||||
"Unknown identifier",
|
||||
"High Temperature",
|
||||
"Bad or shorted cable/module",
|
||||
"Unknown status",
|
||||
};
|
||||
|
||||
/* type == MLX5_EVENT_TYPE_PORT_MODULE_EVENT */
|
||||
static int port_module(struct notifier_block *nb, unsigned long type, void *data)
|
||||
{
|
||||
struct mlx5_event_nb *event_nb = mlx5_nb_cof(nb, struct mlx5_event_nb, nb);
|
||||
struct mlx5_events *events = event_nb->ctx;
|
||||
struct mlx5_eqe *eqe = data;
|
||||
|
||||
enum port_module_event_status_type module_status;
|
||||
enum port_module_event_error_type error_type;
|
||||
struct mlx5_eqe_port_module *module_event_eqe;
|
||||
u8 module_num;
|
||||
|
||||
module_event_eqe = &eqe->data.port_module;
|
||||
module_num = module_event_eqe->module;
|
||||
module_status = module_event_eqe->module_status &
|
||||
PORT_MODULE_EVENT_MODULE_STATUS_MASK;
|
||||
error_type = module_event_eqe->error_type &
|
||||
PORT_MODULE_EVENT_ERROR_TYPE_MASK;
|
||||
if (module_status < MLX5_MODULE_STATUS_ERROR) {
|
||||
events->pme_stats.status_counters[module_status - 1]++;
|
||||
} else if (module_status == MLX5_MODULE_STATUS_ERROR) {
|
||||
if (error_type >= MLX5_MODULE_EVENT_ERROR_UNKNOWN)
|
||||
/* Unknown error type */
|
||||
error_type = MLX5_MODULE_EVENT_ERROR_UNKNOWN;
|
||||
events->pme_stats.error_counters[error_type]++;
|
||||
}
|
||||
|
||||
if (!printk_ratelimit())
|
||||
return NOTIFY_OK;
|
||||
|
||||
if (module_status < MLX5_MODULE_STATUS_ERROR)
|
||||
mlx5_core_info(events->dev,
|
||||
"Port module event: module %u, %s\n",
|
||||
module_num, mlx5_pme_status[module_status - 1]);
|
||||
|
||||
else if (module_status == MLX5_MODULE_STATUS_ERROR)
|
||||
mlx5_core_info(events->dev,
|
||||
"Port module event[error]: module %u, %s, %s\n",
|
||||
module_num, mlx5_pme_status[module_status - 1],
|
||||
mlx5_pme_error[error_type]);
|
||||
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
void mlx5_get_pme_stats(struct mlx5_core_dev *dev, struct mlx5_pme_stats *stats)
|
||||
{
|
||||
*stats = dev->priv.events->pme_stats;
|
||||
}
|
||||
|
||||
/* forward event as is to registered interfaces (mlx5e/mlx5_ib) */
|
||||
static int forward_event(struct notifier_block *nb, unsigned long event, void *data)
|
||||
{
|
||||
struct mlx5_event_nb *event_nb = mlx5_nb_cof(nb, struct mlx5_event_nb, nb);
|
||||
struct mlx5_events *events = event_nb->ctx;
|
||||
struct mlx5_eqe *eqe = data;
|
||||
|
||||
mlx5_core_dbg(events->dev, "Async eqe type %s, subtype (%d) forward to interfaces\n",
|
||||
eqe_type_str(eqe->type), eqe->sub_type);
|
||||
atomic_notifier_call_chain(&events->nh, event, data);
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
int mlx5_events_init(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct mlx5_events *events = kzalloc(sizeof(*events), GFP_KERNEL);
|
||||
|
||||
if (!events)
|
||||
return -ENOMEM;
|
||||
|
||||
ATOMIC_INIT_NOTIFIER_HEAD(&events->nh);
|
||||
events->dev = dev;
|
||||
dev->priv.events = events;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void mlx5_events_cleanup(struct mlx5_core_dev *dev)
|
||||
{
|
||||
kvfree(dev->priv.events);
|
||||
}
|
||||
|
||||
void mlx5_events_start(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct mlx5_events *events = dev->priv.events;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(events_nbs_ref); i++) {
|
||||
events->notifiers[i].nb = events_nbs_ref[i];
|
||||
events->notifiers[i].ctx = events;
|
||||
mlx5_eq_notifier_register(dev, &events->notifiers[i].nb);
|
||||
}
|
||||
}
|
||||
|
||||
void mlx5_events_stop(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct mlx5_events *events = dev->priv.events;
|
||||
int i;
|
||||
|
||||
for (i = ARRAY_SIZE(events_nbs_ref) - 1; i >= 0 ; i--)
|
||||
mlx5_eq_notifier_unregister(dev, &events->notifiers[i].nb);
|
||||
}
|
||||
|
||||
int mlx5_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb)
|
||||
{
|
||||
struct mlx5_events *events = dev->priv.events;
|
||||
|
||||
return atomic_notifier_chain_register(&events->nh, nb);
|
||||
}
|
||||
EXPORT_SYMBOL(mlx5_notifier_register);
|
||||
|
||||
int mlx5_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb)
|
||||
{
|
||||
struct mlx5_events *events = dev->priv.events;
|
||||
|
||||
return atomic_notifier_chain_unregister(&events->nh, nb);
|
||||
}
|
||||
EXPORT_SYMBOL(mlx5_notifier_unregister);
|
||||
|
||||
int mlx5_notifier_call_chain(struct mlx5_events *events, unsigned int event, void *data)
|
||||
{
|
||||
return atomic_notifier_call_chain(&events->nh, event, data);
|
||||
}
|
@ -36,6 +36,7 @@
|
||||
|
||||
#include "mlx5_core.h"
|
||||
#include "lib/mlx5.h"
|
||||
#include "lib/eq.h"
|
||||
#include "fpga/core.h"
|
||||
#include "fpga/conn.h"
|
||||
|
||||
@ -145,6 +146,22 @@ static int mlx5_fpga_device_brb(struct mlx5_fpga_device *fdev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mlx5_fpga_event(struct mlx5_fpga_device *, unsigned long, void *);
|
||||
|
||||
static int fpga_err_event(struct notifier_block *nb, unsigned long event, void *eqe)
|
||||
{
|
||||
struct mlx5_fpga_device *fdev = mlx5_nb_cof(nb, struct mlx5_fpga_device, fpga_err_nb);
|
||||
|
||||
return mlx5_fpga_event(fdev, event, eqe);
|
||||
}
|
||||
|
||||
static int fpga_qp_err_event(struct notifier_block *nb, unsigned long event, void *eqe)
|
||||
{
|
||||
struct mlx5_fpga_device *fdev = mlx5_nb_cof(nb, struct mlx5_fpga_device, fpga_qp_err_nb);
|
||||
|
||||
return mlx5_fpga_event(fdev, event, eqe);
|
||||
}
|
||||
|
||||
int mlx5_fpga_device_start(struct mlx5_core_dev *mdev)
|
||||
{
|
||||
struct mlx5_fpga_device *fdev = mdev->fpga;
|
||||
@ -185,6 +202,11 @@ int mlx5_fpga_device_start(struct mlx5_core_dev *mdev)
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
MLX5_NB_INIT(&fdev->fpga_err_nb, fpga_err_event, FPGA_ERROR);
|
||||
MLX5_NB_INIT(&fdev->fpga_qp_err_nb, fpga_qp_err_event, FPGA_QP_ERROR);
|
||||
mlx5_eq_notifier_register(fdev->mdev, &fdev->fpga_err_nb);
|
||||
mlx5_eq_notifier_register(fdev->mdev, &fdev->fpga_qp_err_nb);
|
||||
|
||||
err = mlx5_fpga_conn_device_init(fdev);
|
||||
if (err)
|
||||
goto err_rsvd_gid;
|
||||
@ -201,6 +223,8 @@ err_conn_init:
|
||||
mlx5_fpga_conn_device_cleanup(fdev);
|
||||
|
||||
err_rsvd_gid:
|
||||
mlx5_eq_notifier_unregister(fdev->mdev, &fdev->fpga_err_nb);
|
||||
mlx5_eq_notifier_unregister(fdev->mdev, &fdev->fpga_qp_err_nb);
|
||||
mlx5_core_unreserve_gids(mdev, max_num_qps);
|
||||
out:
|
||||
spin_lock_irqsave(&fdev->state_lock, flags);
|
||||
@ -256,6 +280,9 @@ void mlx5_fpga_device_stop(struct mlx5_core_dev *mdev)
|
||||
}
|
||||
|
||||
mlx5_fpga_conn_device_cleanup(fdev);
|
||||
mlx5_eq_notifier_unregister(fdev->mdev, &fdev->fpga_err_nb);
|
||||
mlx5_eq_notifier_unregister(fdev->mdev, &fdev->fpga_qp_err_nb);
|
||||
|
||||
max_num_qps = MLX5_CAP_FPGA(mdev, shell_caps.max_num_qps);
|
||||
mlx5_core_unreserve_gids(mdev, max_num_qps);
|
||||
}
|
||||
@ -283,9 +310,10 @@ static const char *mlx5_fpga_qp_syndrome_to_string(u8 syndrome)
|
||||
return "Unknown";
|
||||
}
|
||||
|
||||
void mlx5_fpga_event(struct mlx5_core_dev *mdev, u8 event, void *data)
|
||||
static int mlx5_fpga_event(struct mlx5_fpga_device *fdev,
|
||||
unsigned long event, void *eqe)
|
||||
{
|
||||
struct mlx5_fpga_device *fdev = mdev->fpga;
|
||||
void *data = ((struct mlx5_eqe *)eqe)->data.raw;
|
||||
const char *event_name;
|
||||
bool teardown = false;
|
||||
unsigned long flags;
|
||||
@ -303,9 +331,7 @@ void mlx5_fpga_event(struct mlx5_core_dev *mdev, u8 event, void *data)
|
||||
fpga_qpn = MLX5_GET(fpga_qp_error_event, data, fpga_qpn);
|
||||
break;
|
||||
default:
|
||||
mlx5_fpga_warn_ratelimited(fdev, "Unexpected event %u\n",
|
||||
event);
|
||||
return;
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&fdev->state_lock, flags);
|
||||
@ -326,4 +352,6 @@ void mlx5_fpga_event(struct mlx5_core_dev *mdev, u8 event, void *data)
|
||||
*/
|
||||
if (teardown)
|
||||
mlx5_trigger_health_work(fdev->mdev);
|
||||
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
@ -35,11 +35,16 @@
|
||||
|
||||
#ifdef CONFIG_MLX5_FPGA
|
||||
|
||||
#include <linux/mlx5/eq.h>
|
||||
|
||||
#include "lib/eq.h"
|
||||
#include "fpga/cmd.h"
|
||||
|
||||
/* Represents an Innova device */
|
||||
struct mlx5_fpga_device {
|
||||
struct mlx5_core_dev *mdev;
|
||||
struct mlx5_nb fpga_err_nb;
|
||||
struct mlx5_nb fpga_qp_err_nb;
|
||||
spinlock_t state_lock; /* Protects state transitions */
|
||||
enum mlx5_fpga_status state;
|
||||
enum mlx5_fpga_image last_admin_image;
|
||||
@ -82,7 +87,6 @@ int mlx5_fpga_init(struct mlx5_core_dev *mdev);
|
||||
void mlx5_fpga_cleanup(struct mlx5_core_dev *mdev);
|
||||
int mlx5_fpga_device_start(struct mlx5_core_dev *mdev);
|
||||
void mlx5_fpga_device_stop(struct mlx5_core_dev *mdev);
|
||||
void mlx5_fpga_event(struct mlx5_core_dev *mdev, u8 event, void *data);
|
||||
|
||||
#else
|
||||
|
||||
@ -104,11 +108,6 @@ static inline void mlx5_fpga_device_stop(struct mlx5_core_dev *mdev)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void mlx5_fpga_event(struct mlx5_core_dev *mdev, u8 event,
|
||||
void *data)
|
||||
{
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#endif /* __MLX5_FPGA_CORE_H__ */
|
||||
|
@ -39,6 +39,7 @@
|
||||
#include <linux/mlx5/cmd.h>
|
||||
#include "mlx5_core.h"
|
||||
#include "lib/eq.h"
|
||||
#include "lib/mlx5.h"
|
||||
|
||||
enum {
|
||||
MLX5_HEALTH_POLL_INTERVAL = 2 * HZ,
|
||||
@ -79,29 +80,6 @@ void mlx5_set_nic_state(struct mlx5_core_dev *dev, u8 state)
|
||||
&dev->iseg->cmdq_addr_l_sz);
|
||||
}
|
||||
|
||||
static void trigger_cmd_completions(struct mlx5_core_dev *dev)
|
||||
{
|
||||
unsigned long flags;
|
||||
u64 vector;
|
||||
|
||||
/* wait for pending handlers to complete */
|
||||
mlx5_eq_synchronize_cmd_irq(dev);
|
||||
spin_lock_irqsave(&dev->cmd.alloc_lock, flags);
|
||||
vector = ~dev->cmd.bitmask & ((1ul << (1 << dev->cmd.log_sz)) - 1);
|
||||
if (!vector)
|
||||
goto no_trig;
|
||||
|
||||
vector |= MLX5_TRIGGERED_CMD_COMP;
|
||||
spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags);
|
||||
|
||||
mlx5_core_dbg(dev, "vector 0x%llx\n", vector);
|
||||
mlx5_cmd_comp_handler(dev, vector, true);
|
||||
return;
|
||||
|
||||
no_trig:
|
||||
spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags);
|
||||
}
|
||||
|
||||
static int in_fatal(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct mlx5_core_health *health = &dev->priv.health;
|
||||
@ -125,10 +103,10 @@ void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force)
|
||||
mlx5_core_err(dev, "start\n");
|
||||
if (pci_channel_offline(dev->pdev) || in_fatal(dev) || force) {
|
||||
dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
|
||||
trigger_cmd_completions(dev);
|
||||
mlx5_cmd_trigger_completions(dev);
|
||||
}
|
||||
|
||||
mlx5_core_event(dev, MLX5_DEV_EVENT_SYS_ERROR, 1);
|
||||
mlx5_notifier_call_chain(dev->priv.events, MLX5_DEV_EVENT_SYS_ERROR, (void *)1);
|
||||
mlx5_core_err(dev, "end\n");
|
||||
|
||||
unlock:
|
||||
|
@ -33,6 +33,7 @@
|
||||
#include <linux/clocksource.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <rdma/mlx5-abi.h>
|
||||
#include "lib/eq.h"
|
||||
#include "en.h"
|
||||
#include "clock.h"
|
||||
|
||||
@ -439,16 +440,17 @@ static void mlx5_get_pps_caps(struct mlx5_core_dev *mdev)
|
||||
clock->pps_info.pin_caps[7] = MLX5_GET(mtpps_reg, out, cap_pin_7_mode);
|
||||
}
|
||||
|
||||
void mlx5_pps_event(struct mlx5_core_dev *mdev,
|
||||
struct mlx5_eqe *eqe)
|
||||
static int mlx5_pps_event(struct notifier_block *nb,
|
||||
unsigned long type, void *data)
|
||||
{
|
||||
struct mlx5_clock *clock = &mdev->clock;
|
||||
struct mlx5_clock *clock = mlx5_nb_cof(nb, struct mlx5_clock, pps_nb);
|
||||
struct mlx5_core_dev *mdev = clock->mdev;
|
||||
struct ptp_clock_event ptp_event;
|
||||
struct timespec64 ts;
|
||||
u64 nsec_now, nsec_delta;
|
||||
u64 cycles_now, cycles_delta;
|
||||
u64 nsec_now, nsec_delta, ns;
|
||||
struct mlx5_eqe *eqe = data;
|
||||
int pin = eqe->data.pps.pin;
|
||||
s64 ns;
|
||||
struct timespec64 ts;
|
||||
unsigned long flags;
|
||||
|
||||
switch (clock->ptp_info.pin_config[pin].func) {
|
||||
@ -463,6 +465,7 @@ void mlx5_pps_event(struct mlx5_core_dev *mdev,
|
||||
} else {
|
||||
ptp_event.type = PTP_CLOCK_EXTTS;
|
||||
}
|
||||
/* TODOL clock->ptp can be NULL if ptp_clock_register failes */
|
||||
ptp_clock_event(clock->ptp, &ptp_event);
|
||||
break;
|
||||
case PTP_PF_PEROUT:
|
||||
@ -481,8 +484,11 @@ void mlx5_pps_event(struct mlx5_core_dev *mdev,
|
||||
write_sequnlock_irqrestore(&clock->lock, flags);
|
||||
break;
|
||||
default:
|
||||
mlx5_core_err(mdev, " Unhandled event\n");
|
||||
mlx5_core_err(mdev, " Unhandled clock PPS event, func %d\n",
|
||||
clock->ptp_info.pin_config[pin].func);
|
||||
}
|
||||
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
void mlx5_init_clock(struct mlx5_core_dev *mdev)
|
||||
@ -567,6 +573,9 @@ void mlx5_init_clock(struct mlx5_core_dev *mdev)
|
||||
PTR_ERR(clock->ptp));
|
||||
clock->ptp = NULL;
|
||||
}
|
||||
|
||||
MLX5_NB_INIT(&clock->pps_nb, mlx5_pps_event, PPS_EVENT);
|
||||
mlx5_eq_notifier_register(mdev, &clock->pps_nb);
|
||||
}
|
||||
|
||||
void mlx5_cleanup_clock(struct mlx5_core_dev *mdev)
|
||||
@ -576,6 +585,7 @@ void mlx5_cleanup_clock(struct mlx5_core_dev *mdev)
|
||||
if (!MLX5_CAP_GEN(mdev, device_frequency_khz))
|
||||
return;
|
||||
|
||||
mlx5_eq_notifier_unregister(mdev, &clock->pps_nb);
|
||||
if (clock->ptp) {
|
||||
ptp_clock_unregister(clock->ptp);
|
||||
clock->ptp = NULL;
|
||||
|
@ -36,7 +36,6 @@
|
||||
#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
|
||||
void mlx5_init_clock(struct mlx5_core_dev *mdev);
|
||||
void mlx5_cleanup_clock(struct mlx5_core_dev *mdev);
|
||||
void mlx5_pps_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe);
|
||||
|
||||
static inline int mlx5_clock_get_ptp_index(struct mlx5_core_dev *mdev)
|
||||
{
|
||||
@ -60,8 +59,6 @@ static inline ktime_t mlx5_timecounter_cyc2time(struct mlx5_clock *clock,
|
||||
#else
|
||||
static inline void mlx5_init_clock(struct mlx5_core_dev *mdev) {}
|
||||
static inline void mlx5_cleanup_clock(struct mlx5_core_dev *mdev) {}
|
||||
static inline void mlx5_pps_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe) {}
|
||||
|
||||
static inline int mlx5_clock_get_ptp_index(struct mlx5_core_dev *mdev)
|
||||
{
|
||||
return -1;
|
||||
|
@ -4,6 +4,8 @@
|
||||
#ifndef __LIB_MLX5_EQ_H__
|
||||
#define __LIB_MLX5_EQ_H__
|
||||
#include <linux/mlx5/driver.h>
|
||||
#include <linux/mlx5/eq.h>
|
||||
#include <linux/mlx5/cq.h>
|
||||
|
||||
#define MLX5_MAX_IRQ_NAME (32)
|
||||
#define MLX5_EQE_SIZE (sizeof(struct mlx5_eqe))
|
||||
@ -90,4 +92,7 @@ void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev);
|
||||
struct cpu_rmap *mlx5_eq_table_get_rmap(struct mlx5_core_dev *dev);
|
||||
#endif
|
||||
|
||||
int mlx5_eq_notifier_register(struct mlx5_core_dev *dev, struct mlx5_nb *nb);
|
||||
int mlx5_eq_notifier_unregister(struct mlx5_core_dev *dev, struct mlx5_nb *nb);
|
||||
|
||||
#endif
|
||||
|
@ -33,6 +33,8 @@
|
||||
#ifndef __LIB_MLX5_H__
|
||||
#define __LIB_MLX5_H__
|
||||
|
||||
#include "mlx5_core.h"
|
||||
|
||||
void mlx5_init_reserved_gids(struct mlx5_core_dev *dev);
|
||||
void mlx5_cleanup_reserved_gids(struct mlx5_core_dev *dev);
|
||||
int mlx5_core_reserve_gids(struct mlx5_core_dev *dev, unsigned int count);
|
||||
@ -40,4 +42,37 @@ void mlx5_core_unreserve_gids(struct mlx5_core_dev *dev, unsigned int count);
|
||||
int mlx5_core_reserved_gid_alloc(struct mlx5_core_dev *dev, int *gid_index);
|
||||
void mlx5_core_reserved_gid_free(struct mlx5_core_dev *dev, int gid_index);
|
||||
|
||||
/* TODO move to lib/events.h */
|
||||
|
||||
#define PORT_MODULE_EVENT_MODULE_STATUS_MASK 0xF
|
||||
#define PORT_MODULE_EVENT_ERROR_TYPE_MASK 0xF
|
||||
|
||||
enum port_module_event_status_type {
|
||||
MLX5_MODULE_STATUS_PLUGGED = 0x1,
|
||||
MLX5_MODULE_STATUS_UNPLUGGED = 0x2,
|
||||
MLX5_MODULE_STATUS_ERROR = 0x3,
|
||||
MLX5_MODULE_STATUS_NUM = 0x3,
|
||||
};
|
||||
|
||||
enum port_module_event_error_type {
|
||||
MLX5_MODULE_EVENT_ERROR_POWER_BUDGET_EXCEEDED,
|
||||
MLX5_MODULE_EVENT_ERROR_LONG_RANGE_FOR_NON_MLNX_CABLE_MODULE,
|
||||
MLX5_MODULE_EVENT_ERROR_BUS_STUCK,
|
||||
MLX5_MODULE_EVENT_ERROR_NO_EEPROM_RETRY_TIMEOUT,
|
||||
MLX5_MODULE_EVENT_ERROR_ENFORCE_PART_NUMBER_LIST,
|
||||
MLX5_MODULE_EVENT_ERROR_UNKNOWN_IDENTIFIER,
|
||||
MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE,
|
||||
MLX5_MODULE_EVENT_ERROR_BAD_CABLE,
|
||||
MLX5_MODULE_EVENT_ERROR_UNKNOWN,
|
||||
MLX5_MODULE_EVENT_ERROR_NUM,
|
||||
};
|
||||
|
||||
struct mlx5_pme_stats {
|
||||
u64 status_counters[MLX5_MODULE_STATUS_NUM];
|
||||
u64 error_counters[MLX5_MODULE_EVENT_ERROR_NUM];
|
||||
};
|
||||
|
||||
void mlx5_get_pme_stats(struct mlx5_core_dev *dev, struct mlx5_pme_stats *stats);
|
||||
int mlx5_notifier_call_chain(struct mlx5_events *events, unsigned int event, void *data);
|
||||
|
||||
#endif
|
||||
|
@ -43,7 +43,6 @@
|
||||
#include <linux/mlx5/driver.h>
|
||||
#include <linux/mlx5/cq.h>
|
||||
#include <linux/mlx5/qp.h>
|
||||
#include <linux/mlx5/srq.h>
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/kmod.h>
|
||||
#include <linux/mlx5/mlx5_ifc.h>
|
||||
@ -735,15 +734,19 @@ static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
|
||||
goto out;
|
||||
}
|
||||
|
||||
err = mlx5_cq_debugfs_init(dev);
|
||||
err = mlx5_events_init(dev);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "failed to initialize cq debugfs\n");
|
||||
dev_err(&pdev->dev, "failed to initialize events\n");
|
||||
goto err_eq_cleanup;
|
||||
}
|
||||
|
||||
mlx5_init_qp_table(dev);
|
||||
err = mlx5_cq_debugfs_init(dev);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "failed to initialize cq debugfs\n");
|
||||
goto err_events_cleanup;
|
||||
}
|
||||
|
||||
mlx5_init_srq_table(dev);
|
||||
mlx5_init_qp_table(dev);
|
||||
|
||||
mlx5_init_mkey_table(dev);
|
||||
|
||||
@ -798,10 +801,10 @@ err_rl_cleanup:
|
||||
err_tables_cleanup:
|
||||
mlx5_vxlan_destroy(dev->vxlan);
|
||||
mlx5_cleanup_mkey_table(dev);
|
||||
mlx5_cleanup_srq_table(dev);
|
||||
mlx5_cleanup_qp_table(dev);
|
||||
mlx5_cq_debugfs_cleanup(dev);
|
||||
|
||||
err_events_cleanup:
|
||||
mlx5_events_cleanup(dev);
|
||||
err_eq_cleanup:
|
||||
mlx5_eq_table_cleanup(dev);
|
||||
|
||||
@ -821,9 +824,9 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
|
||||
mlx5_cleanup_clock(dev);
|
||||
mlx5_cleanup_reserved_gids(dev);
|
||||
mlx5_cleanup_mkey_table(dev);
|
||||
mlx5_cleanup_srq_table(dev);
|
||||
mlx5_cleanup_qp_table(dev);
|
||||
mlx5_cq_debugfs_cleanup(dev);
|
||||
mlx5_events_cleanup(dev);
|
||||
mlx5_eq_table_cleanup(dev);
|
||||
}
|
||||
|
||||
@ -916,16 +919,10 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
|
||||
goto reclaim_boot_pages;
|
||||
}
|
||||
|
||||
err = mlx5_pagealloc_start(dev);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "mlx5_pagealloc_start failed\n");
|
||||
goto reclaim_boot_pages;
|
||||
}
|
||||
|
||||
err = mlx5_cmd_init_hca(dev, sw_owner_id);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "init hca failed\n");
|
||||
goto err_pagealloc_stop;
|
||||
goto reclaim_boot_pages;
|
||||
}
|
||||
|
||||
mlx5_set_driver_version(dev);
|
||||
@ -953,6 +950,9 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
|
||||
goto err_get_uars;
|
||||
}
|
||||
|
||||
mlx5_events_start(dev);
|
||||
mlx5_pagealloc_start(dev);
|
||||
|
||||
err = mlx5_eq_table_create(dev);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "Failed to create EQs\n");
|
||||
@ -1039,6 +1039,8 @@ err_fw_tracer:
|
||||
mlx5_eq_table_destroy(dev);
|
||||
|
||||
err_eq_table:
|
||||
mlx5_pagealloc_stop(dev);
|
||||
mlx5_events_stop(dev);
|
||||
mlx5_put_uars_page(dev, priv->uar);
|
||||
|
||||
err_get_uars:
|
||||
@ -1052,9 +1054,6 @@ err_stop_poll:
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
err_pagealloc_stop:
|
||||
mlx5_pagealloc_stop(dev);
|
||||
|
||||
reclaim_boot_pages:
|
||||
mlx5_reclaim_startup_pages(dev);
|
||||
|
||||
@ -1100,16 +1099,18 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
|
||||
mlx5_fpga_device_stop(dev);
|
||||
mlx5_fw_tracer_cleanup(dev->tracer);
|
||||
mlx5_eq_table_destroy(dev);
|
||||
mlx5_pagealloc_stop(dev);
|
||||
mlx5_events_stop(dev);
|
||||
mlx5_put_uars_page(dev, priv->uar);
|
||||
if (cleanup)
|
||||
mlx5_cleanup_once(dev);
|
||||
mlx5_stop_health_poll(dev, cleanup);
|
||||
|
||||
err = mlx5_cmd_teardown_hca(dev);
|
||||
if (err) {
|
||||
dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n");
|
||||
goto out;
|
||||
}
|
||||
mlx5_pagealloc_stop(dev);
|
||||
mlx5_reclaim_startup_pages(dev);
|
||||
mlx5_core_disable_hca(dev, 0);
|
||||
mlx5_cmd_cleanup(dev);
|
||||
@ -1119,12 +1120,6 @@ out:
|
||||
return err;
|
||||
}
|
||||
|
||||
struct mlx5_core_event_handler {
|
||||
void (*event)(struct mlx5_core_dev *dev,
|
||||
enum mlx5_dev_event event,
|
||||
void *data);
|
||||
};
|
||||
|
||||
static const struct devlink_ops mlx5_devlink_ops = {
|
||||
#ifdef CONFIG_MLX5_ESWITCH
|
||||
.eswitch_mode_set = mlx5_devlink_eswitch_mode_set,
|
||||
@ -1158,7 +1153,6 @@ static int init_one(struct pci_dev *pdev,
|
||||
pci_set_drvdata(pdev, dev);
|
||||
|
||||
dev->pdev = pdev;
|
||||
dev->event = mlx5_core_event;
|
||||
dev->profile = &profile[prof_sel];
|
||||
|
||||
INIT_LIST_HEAD(&priv->ctx_list);
|
||||
@ -1166,9 +1160,6 @@ static int init_one(struct pci_dev *pdev,
|
||||
mutex_init(&dev->pci_status_mutex);
|
||||
mutex_init(&dev->intf_state_mutex);
|
||||
|
||||
INIT_LIST_HEAD(&priv->waiting_events_list);
|
||||
priv->is_accum_events = false;
|
||||
|
||||
mutex_init(&priv->bfregs.reg_head.lock);
|
||||
mutex_init(&priv->bfregs.wc_head.lock);
|
||||
INIT_LIST_HEAD(&priv->bfregs.reg_head.list);
|
||||
@ -1186,12 +1177,14 @@ static int init_one(struct pci_dev *pdev,
|
||||
goto close_pci;
|
||||
}
|
||||
|
||||
mlx5_pagealloc_init(dev);
|
||||
err = mlx5_pagealloc_init(dev);
|
||||
if (err)
|
||||
goto err_pagealloc_init;
|
||||
|
||||
err = mlx5_load_one(dev, priv, true);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "mlx5_load_one failed with error code %d\n", err);
|
||||
goto clean_health;
|
||||
goto err_load_one;
|
||||
}
|
||||
|
||||
request_module_nowait(MLX5_IB_MOD);
|
||||
@ -1205,8 +1198,9 @@ static int init_one(struct pci_dev *pdev,
|
||||
|
||||
clean_load:
|
||||
mlx5_unload_one(dev, priv, true);
|
||||
clean_health:
|
||||
err_load_one:
|
||||
mlx5_pagealloc_cleanup(dev);
|
||||
err_pagealloc_init:
|
||||
mlx5_health_cleanup(dev);
|
||||
close_pci:
|
||||
mlx5_pci_close(dev, priv);
|
||||
|
@ -78,6 +78,11 @@ do { \
|
||||
__func__, __LINE__, current->pid, \
|
||||
##__VA_ARGS__)
|
||||
|
||||
#define mlx5_core_warn_once(__dev, format, ...) \
|
||||
dev_warn_once(&(__dev)->pdev->dev, "%s:%d:(pid %d): " format, \
|
||||
__func__, __LINE__, current->pid, \
|
||||
##__VA_ARGS__)
|
||||
|
||||
#define mlx5_core_info(__dev, format, ...) \
|
||||
dev_info(&(__dev)->pdev->dev, format, ##__VA_ARGS__)
|
||||
|
||||
@ -97,10 +102,6 @@ int mlx5_cmd_init_hca(struct mlx5_core_dev *dev, uint32_t *sw_owner_id);
|
||||
int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev);
|
||||
int mlx5_cmd_force_teardown_hca(struct mlx5_core_dev *dev);
|
||||
int mlx5_cmd_fast_teardown_hca(struct mlx5_core_dev *dev);
|
||||
|
||||
void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
|
||||
unsigned long param);
|
||||
void mlx5_port_module_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe);
|
||||
void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force);
|
||||
void mlx5_disable_device(struct mlx5_core_dev *dev);
|
||||
void mlx5_recover_device(struct mlx5_core_dev *dev);
|
||||
@ -122,7 +123,7 @@ int mlx5_destroy_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
|
||||
int mlx5_wait_for_vf_pages(struct mlx5_core_dev *dev);
|
||||
u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev);
|
||||
|
||||
void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced);
|
||||
void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev);
|
||||
int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev);
|
||||
void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev);
|
||||
|
||||
@ -136,6 +137,11 @@ int mlx5_query_qcam_reg(struct mlx5_core_dev *mdev, u32 *qcam,
|
||||
void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev);
|
||||
void mlx5_lag_remove(struct mlx5_core_dev *dev);
|
||||
|
||||
int mlx5_events_init(struct mlx5_core_dev *dev);
|
||||
void mlx5_events_cleanup(struct mlx5_core_dev *dev);
|
||||
void mlx5_events_start(struct mlx5_core_dev *dev);
|
||||
void mlx5_events_stop(struct mlx5_core_dev *dev);
|
||||
|
||||
void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv);
|
||||
void mlx5_remove_device(struct mlx5_interface *intf, struct mlx5_priv *priv);
|
||||
void mlx5_attach_device(struct mlx5_core_dev *dev);
|
||||
|
@ -37,6 +37,7 @@
|
||||
#include <linux/mlx5/driver.h>
|
||||
#include <linux/mlx5/cmd.h>
|
||||
#include "mlx5_core.h"
|
||||
#include "lib/eq.h"
|
||||
|
||||
enum {
|
||||
MLX5_PAGES_CANT_GIVE = 0,
|
||||
@ -433,15 +434,28 @@ static void pages_work_handler(struct work_struct *work)
|
||||
kfree(req);
|
||||
}
|
||||
|
||||
void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
|
||||
s32 npages)
|
||||
static int req_pages_handler(struct notifier_block *nb,
|
||||
unsigned long type, void *data)
|
||||
{
|
||||
struct mlx5_pages_req *req;
|
||||
struct mlx5_core_dev *dev;
|
||||
struct mlx5_priv *priv;
|
||||
struct mlx5_eqe *eqe;
|
||||
u16 func_id;
|
||||
s32 npages;
|
||||
|
||||
priv = mlx5_nb_cof(nb, struct mlx5_priv, pg_nb);
|
||||
dev = container_of(priv, struct mlx5_core_dev, priv);
|
||||
eqe = data;
|
||||
|
||||
func_id = be16_to_cpu(eqe->data.req_pages.func_id);
|
||||
npages = be32_to_cpu(eqe->data.req_pages.num_pages);
|
||||
mlx5_core_dbg(dev, "page request for func 0x%x, npages %d\n",
|
||||
func_id, npages);
|
||||
req = kzalloc(sizeof(*req), GFP_ATOMIC);
|
||||
if (!req) {
|
||||
mlx5_core_warn(dev, "failed to allocate pages request\n");
|
||||
return;
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
req->dev = dev;
|
||||
@ -449,6 +463,7 @@ void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
|
||||
req->npages = npages;
|
||||
INIT_WORK(&req->work, pages_work_handler);
|
||||
queue_work(dev->priv.pg_wq, &req->work);
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot)
|
||||
@ -524,19 +539,10 @@ int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void mlx5_pagealloc_init(struct mlx5_core_dev *dev)
|
||||
int mlx5_pagealloc_init(struct mlx5_core_dev *dev)
|
||||
{
|
||||
dev->priv.page_root = RB_ROOT;
|
||||
INIT_LIST_HEAD(&dev->priv.free_list);
|
||||
}
|
||||
|
||||
void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev)
|
||||
{
|
||||
/* nothing */
|
||||
}
|
||||
|
||||
int mlx5_pagealloc_start(struct mlx5_core_dev *dev)
|
||||
{
|
||||
dev->priv.pg_wq = create_singlethread_workqueue("mlx5_page_allocator");
|
||||
if (!dev->priv.pg_wq)
|
||||
return -ENOMEM;
|
||||
@ -544,11 +550,23 @@ int mlx5_pagealloc_start(struct mlx5_core_dev *dev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void mlx5_pagealloc_stop(struct mlx5_core_dev *dev)
|
||||
void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev)
|
||||
{
|
||||
destroy_workqueue(dev->priv.pg_wq);
|
||||
}
|
||||
|
||||
void mlx5_pagealloc_start(struct mlx5_core_dev *dev)
|
||||
{
|
||||
MLX5_NB_INIT(&dev->priv.pg_nb, req_pages_handler, PAGE_REQUEST);
|
||||
mlx5_eq_notifier_register(dev, &dev->priv.pg_nb);
|
||||
}
|
||||
|
||||
void mlx5_pagealloc_stop(struct mlx5_core_dev *dev)
|
||||
{
|
||||
mlx5_eq_notifier_unregister(dev, &dev->priv.pg_nb);
|
||||
flush_workqueue(dev->priv.pg_wq);
|
||||
}
|
||||
|
||||
int mlx5_wait_for_vf_pages(struct mlx5_core_dev *dev)
|
||||
{
|
||||
unsigned long end = jiffies + msecs_to_jiffies(MAX_RECLAIM_VFS_PAGES_TIME_MSECS);
|
||||
|
@ -915,63 +915,6 @@ void mlx5_query_port_fcs(struct mlx5_core_dev *mdev, bool *supported,
|
||||
*enabled = !!(MLX5_GET(pcmr_reg, out, fcs_chk));
|
||||
}
|
||||
|
||||
static const char *mlx5_pme_status[MLX5_MODULE_STATUS_NUM] = {
|
||||
"Cable plugged", /* MLX5_MODULE_STATUS_PLUGGED = 0x1 */
|
||||
"Cable unplugged", /* MLX5_MODULE_STATUS_UNPLUGGED = 0x2 */
|
||||
"Cable error", /* MLX5_MODULE_STATUS_ERROR = 0x3 */
|
||||
};
|
||||
|
||||
static const char *mlx5_pme_error[MLX5_MODULE_EVENT_ERROR_NUM] = {
|
||||
"Power budget exceeded",
|
||||
"Long Range for non MLNX cable",
|
||||
"Bus stuck(I2C or data shorted)",
|
||||
"No EEPROM/retry timeout",
|
||||
"Enforce part number list",
|
||||
"Unknown identifier",
|
||||
"High Temperature",
|
||||
"Bad or shorted cable/module",
|
||||
"Unknown status",
|
||||
};
|
||||
|
||||
void mlx5_port_module_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe)
|
||||
{
|
||||
enum port_module_event_status_type module_status;
|
||||
enum port_module_event_error_type error_type;
|
||||
struct mlx5_eqe_port_module *module_event_eqe;
|
||||
struct mlx5_priv *priv = &dev->priv;
|
||||
u8 module_num;
|
||||
|
||||
module_event_eqe = &eqe->data.port_module;
|
||||
module_num = module_event_eqe->module;
|
||||
module_status = module_event_eqe->module_status &
|
||||
PORT_MODULE_EVENT_MODULE_STATUS_MASK;
|
||||
error_type = module_event_eqe->error_type &
|
||||
PORT_MODULE_EVENT_ERROR_TYPE_MASK;
|
||||
|
||||
if (module_status < MLX5_MODULE_STATUS_ERROR) {
|
||||
priv->pme_stats.status_counters[module_status - 1]++;
|
||||
} else if (module_status == MLX5_MODULE_STATUS_ERROR) {
|
||||
if (error_type >= MLX5_MODULE_EVENT_ERROR_UNKNOWN)
|
||||
/* Unknown error type */
|
||||
error_type = MLX5_MODULE_EVENT_ERROR_UNKNOWN;
|
||||
priv->pme_stats.error_counters[error_type]++;
|
||||
}
|
||||
|
||||
if (!printk_ratelimit())
|
||||
return;
|
||||
|
||||
if (module_status < MLX5_MODULE_STATUS_ERROR)
|
||||
mlx5_core_info(dev,
|
||||
"Port module event: module %u, %s\n",
|
||||
module_num, mlx5_pme_status[module_status - 1]);
|
||||
|
||||
else if (module_status == MLX5_MODULE_STATUS_ERROR)
|
||||
mlx5_core_info(dev,
|
||||
"Port module event[error]: module %u, %s, %s\n",
|
||||
module_num, mlx5_pme_status[module_status - 1],
|
||||
mlx5_pme_error[error_type]);
|
||||
}
|
||||
|
||||
int mlx5_query_mtpps(struct mlx5_core_dev *mdev, u32 *mtpps, u32 mtpps_size)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
|
||||
|
@ -38,11 +38,11 @@
|
||||
#include <linux/mlx5/transobj.h>
|
||||
|
||||
#include "mlx5_core.h"
|
||||
#include "lib/eq.h"
|
||||
|
||||
static struct mlx5_core_rsc_common *mlx5_get_rsc(struct mlx5_core_dev *dev,
|
||||
u32 rsn)
|
||||
static struct mlx5_core_rsc_common *
|
||||
mlx5_get_rsc(struct mlx5_qp_table *table, u32 rsn)
|
||||
{
|
||||
struct mlx5_qp_table *table = &dev->priv.qp_table;
|
||||
struct mlx5_core_rsc_common *common;
|
||||
|
||||
spin_lock(&table->lock);
|
||||
@ -53,11 +53,6 @@ static struct mlx5_core_rsc_common *mlx5_get_rsc(struct mlx5_core_dev *dev,
|
||||
|
||||
spin_unlock(&table->lock);
|
||||
|
||||
if (!common) {
|
||||
mlx5_core_warn(dev, "Async event for bogus resource 0x%x\n",
|
||||
rsn);
|
||||
return NULL;
|
||||
}
|
||||
return common;
|
||||
}
|
||||
|
||||
@ -120,14 +115,52 @@ static bool is_event_type_allowed(int rsc_type, int event_type)
|
||||
}
|
||||
}
|
||||
|
||||
void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type)
|
||||
static int rsc_event_notifier(struct notifier_block *nb,
|
||||
unsigned long type, void *data)
|
||||
{
|
||||
struct mlx5_core_rsc_common *common = mlx5_get_rsc(dev, rsn);
|
||||
struct mlx5_core_rsc_common *common;
|
||||
struct mlx5_qp_table *table;
|
||||
struct mlx5_core_dev *dev;
|
||||
struct mlx5_core_dct *dct;
|
||||
u8 event_type = (u8)type;
|
||||
struct mlx5_core_qp *qp;
|
||||
struct mlx5_priv *priv;
|
||||
struct mlx5_eqe *eqe;
|
||||
u32 rsn;
|
||||
|
||||
if (!common)
|
||||
return;
|
||||
switch (event_type) {
|
||||
case MLX5_EVENT_TYPE_DCT_DRAINED:
|
||||
eqe = data;
|
||||
rsn = be32_to_cpu(eqe->data.dct.dctn) & 0xffffff;
|
||||
rsn |= (MLX5_RES_DCT << MLX5_USER_INDEX_LEN);
|
||||
break;
|
||||
case MLX5_EVENT_TYPE_PATH_MIG:
|
||||
case MLX5_EVENT_TYPE_COMM_EST:
|
||||
case MLX5_EVENT_TYPE_SQ_DRAINED:
|
||||
case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
|
||||
case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
|
||||
case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
|
||||
case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
|
||||
case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
|
||||
eqe = data;
|
||||
rsn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
|
||||
rsn |= (eqe->data.qp_srq.type << MLX5_USER_INDEX_LEN);
|
||||
break;
|
||||
default:
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
table = container_of(nb, struct mlx5_qp_table, nb);
|
||||
priv = container_of(table, struct mlx5_priv, qp_table);
|
||||
dev = container_of(priv, struct mlx5_core_dev, priv);
|
||||
|
||||
mlx5_core_dbg(dev, "event (%d) arrived on resource 0x%x\n", eqe->type, rsn);
|
||||
|
||||
common = mlx5_get_rsc(table, rsn);
|
||||
if (!common) {
|
||||
mlx5_core_warn(dev, "Async event for bogus resource 0x%x\n", rsn);
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
if (!is_event_type_allowed((rsn >> MLX5_USER_INDEX_LEN), event_type)) {
|
||||
mlx5_core_warn(dev, "event 0x%.2x is not allowed on resource 0x%.8x\n",
|
||||
@ -152,6 +185,8 @@ void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type)
|
||||
}
|
||||
out:
|
||||
mlx5_core_put_rsc(common);
|
||||
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
static int create_resource_common(struct mlx5_core_dev *dev,
|
||||
@ -487,10 +522,16 @@ void mlx5_init_qp_table(struct mlx5_core_dev *dev)
|
||||
spin_lock_init(&table->lock);
|
||||
INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
|
||||
mlx5_qp_debugfs_init(dev);
|
||||
|
||||
table->nb.notifier_call = rsc_event_notifier;
|
||||
mlx5_notifier_register(dev, &table->nb);
|
||||
}
|
||||
|
||||
void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct mlx5_qp_table *table = &dev->priv.qp_table;
|
||||
|
||||
mlx5_notifier_unregister(dev, &table->nb);
|
||||
mlx5_qp_debugfs_cleanup(dev);
|
||||
}
|
||||
|
||||
@ -676,8 +717,9 @@ struct mlx5_core_rsc_common *mlx5_core_res_hold(struct mlx5_core_dev *dev,
|
||||
enum mlx5_res_type res_type)
|
||||
{
|
||||
u32 rsn = res_num | (res_type << MLX5_USER_INDEX_LEN);
|
||||
struct mlx5_qp_table *table = &dev->priv.qp_table;
|
||||
|
||||
return mlx5_get_rsc(dev, rsn);
|
||||
return mlx5_get_rsc(table, rsn);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx5_core_res_hold);
|
||||
|
||||
|
@ -258,115 +258,6 @@ void mlx5_core_destroy_tis(struct mlx5_core_dev *dev, u32 tisn)
|
||||
}
|
||||
EXPORT_SYMBOL(mlx5_core_destroy_tis);
|
||||
|
||||
int mlx5_core_create_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen,
|
||||
u32 *rmpn)
|
||||
{
|
||||
u32 out[MLX5_ST_SZ_DW(create_rmp_out)] = {0};
|
||||
int err;
|
||||
|
||||
MLX5_SET(create_rmp_in, in, opcode, MLX5_CMD_OP_CREATE_RMP);
|
||||
err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
|
||||
if (!err)
|
||||
*rmpn = MLX5_GET(create_rmp_out, out, rmpn);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
int mlx5_core_modify_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen)
|
||||
{
|
||||
u32 out[MLX5_ST_SZ_DW(modify_rmp_out)] = {0};
|
||||
|
||||
MLX5_SET(modify_rmp_in, in, opcode, MLX5_CMD_OP_MODIFY_RMP);
|
||||
return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
|
||||
}
|
||||
|
||||
int mlx5_core_destroy_rmp(struct mlx5_core_dev *dev, u32 rmpn)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(destroy_rmp_in)] = {0};
|
||||
u32 out[MLX5_ST_SZ_DW(destroy_rmp_out)] = {0};
|
||||
|
||||
MLX5_SET(destroy_rmp_in, in, opcode, MLX5_CMD_OP_DESTROY_RMP);
|
||||
MLX5_SET(destroy_rmp_in, in, rmpn, rmpn);
|
||||
return mlx5_cmd_exec(dev, in, sizeof(in), out,
|
||||
sizeof(out));
|
||||
}
|
||||
|
||||
int mlx5_core_query_rmp(struct mlx5_core_dev *dev, u32 rmpn, u32 *out)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(query_rmp_in)] = {0};
|
||||
int outlen = MLX5_ST_SZ_BYTES(query_rmp_out);
|
||||
|
||||
MLX5_SET(query_rmp_in, in, opcode, MLX5_CMD_OP_QUERY_RMP);
|
||||
MLX5_SET(query_rmp_in, in, rmpn, rmpn);
|
||||
return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
|
||||
}
|
||||
|
||||
int mlx5_core_arm_rmp(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm)
|
||||
{
|
||||
void *in;
|
||||
void *rmpc;
|
||||
void *wq;
|
||||
void *bitmask;
|
||||
int err;
|
||||
|
||||
in = kvzalloc(MLX5_ST_SZ_BYTES(modify_rmp_in), GFP_KERNEL);
|
||||
if (!in)
|
||||
return -ENOMEM;
|
||||
|
||||
rmpc = MLX5_ADDR_OF(modify_rmp_in, in, ctx);
|
||||
bitmask = MLX5_ADDR_OF(modify_rmp_in, in, bitmask);
|
||||
wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
|
||||
|
||||
MLX5_SET(modify_rmp_in, in, rmp_state, MLX5_RMPC_STATE_RDY);
|
||||
MLX5_SET(modify_rmp_in, in, rmpn, rmpn);
|
||||
MLX5_SET(wq, wq, lwm, lwm);
|
||||
MLX5_SET(rmp_bitmask, bitmask, lwm, 1);
|
||||
MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY);
|
||||
|
||||
err = mlx5_core_modify_rmp(dev, in, MLX5_ST_SZ_BYTES(modify_rmp_in));
|
||||
|
||||
kvfree(in);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
int mlx5_core_create_xsrq(struct mlx5_core_dev *dev, u32 *in, int inlen,
|
||||
u32 *xsrqn)
|
||||
{
|
||||
u32 out[MLX5_ST_SZ_DW(create_xrc_srq_out)] = {0};
|
||||
int err;
|
||||
|
||||
MLX5_SET(create_xrc_srq_in, in, opcode, MLX5_CMD_OP_CREATE_XRC_SRQ);
|
||||
err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
|
||||
if (!err)
|
||||
*xsrqn = MLX5_GET(create_xrc_srq_out, out, xrc_srqn);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
int mlx5_core_destroy_xsrq(struct mlx5_core_dev *dev, u32 xsrqn)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(destroy_xrc_srq_in)] = {0};
|
||||
u32 out[MLX5_ST_SZ_DW(destroy_xrc_srq_out)] = {0};
|
||||
|
||||
MLX5_SET(destroy_xrc_srq_in, in, opcode, MLX5_CMD_OP_DESTROY_XRC_SRQ);
|
||||
MLX5_SET(destroy_xrc_srq_in, in, xrc_srqn, xsrqn);
|
||||
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
|
||||
}
|
||||
|
||||
int mlx5_core_arm_xsrq(struct mlx5_core_dev *dev, u32 xsrqn, u16 lwm)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(arm_xrc_srq_in)] = {0};
|
||||
u32 out[MLX5_ST_SZ_DW(arm_xrc_srq_out)] = {0};
|
||||
|
||||
MLX5_SET(arm_xrc_srq_in, in, opcode, MLX5_CMD_OP_ARM_XRC_SRQ);
|
||||
MLX5_SET(arm_xrc_srq_in, in, xrc_srqn, xsrqn);
|
||||
MLX5_SET(arm_xrc_srq_in, in, lwm, lwm);
|
||||
MLX5_SET(arm_xrc_srq_in, in, op_mod,
|
||||
MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ);
|
||||
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
|
||||
}
|
||||
|
||||
int mlx5_core_create_rqt(struct mlx5_core_dev *dev, u32 *in, int inlen,
|
||||
u32 *rqtn)
|
||||
{
|
||||
|
@ -301,9 +301,15 @@ enum {
|
||||
MLX5_EVENT_QUEUE_TYPE_DCT = 6,
|
||||
};
|
||||
|
||||
/* mlx5 components can subscribe to any one of these events via
|
||||
* mlx5_eq_notifier_register API.
|
||||
*/
|
||||
enum mlx5_event {
|
||||
/* Special value to subscribe to any event */
|
||||
MLX5_EVENT_TYPE_NOTIFY_ANY = 0x0,
|
||||
/* HW events enum start: comp events are not subscribable */
|
||||
MLX5_EVENT_TYPE_COMP = 0x0,
|
||||
|
||||
/* HW Async events enum start: subscribable events */
|
||||
MLX5_EVENT_TYPE_PATH_MIG = 0x01,
|
||||
MLX5_EVENT_TYPE_COMM_EST = 0x02,
|
||||
MLX5_EVENT_TYPE_SQ_DRAINED = 0x03,
|
||||
@ -341,6 +347,8 @@ enum mlx5_event {
|
||||
MLX5_EVENT_TYPE_FPGA_QP_ERROR = 0x21,
|
||||
|
||||
MLX5_EVENT_TYPE_DEVICE_TRACER = 0x26,
|
||||
|
||||
MLX5_EVENT_TYPE_MAX = MLX5_EVENT_TYPE_DEVICE_TRACER + 1,
|
||||
};
|
||||
|
||||
enum {
|
||||
|
@ -46,10 +46,11 @@
|
||||
#include <linux/mempool.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/idr.h>
|
||||
#include <linux/notifier.h>
|
||||
|
||||
#include <linux/mlx5/device.h>
|
||||
#include <linux/mlx5/doorbell.h>
|
||||
#include <linux/mlx5/srq.h>
|
||||
#include <linux/mlx5/eq.h>
|
||||
#include <linux/timecounter.h>
|
||||
#include <linux/ptp_clock_kernel.h>
|
||||
|
||||
@ -193,16 +194,7 @@ struct mlx5_rsc_debug {
|
||||
};
|
||||
|
||||
enum mlx5_dev_event {
|
||||
MLX5_DEV_EVENT_SYS_ERROR,
|
||||
MLX5_DEV_EVENT_PORT_UP,
|
||||
MLX5_DEV_EVENT_PORT_DOWN,
|
||||
MLX5_DEV_EVENT_PORT_INITIALIZED,
|
||||
MLX5_DEV_EVENT_LID_CHANGE,
|
||||
MLX5_DEV_EVENT_PKEY_CHANGE,
|
||||
MLX5_DEV_EVENT_GUID_CHANGE,
|
||||
MLX5_DEV_EVENT_CLIENT_REREG,
|
||||
MLX5_DEV_EVENT_PPS,
|
||||
MLX5_DEV_EVENT_DELAY_DROP_TIMEOUT,
|
||||
MLX5_DEV_EVENT_SYS_ERROR = 128, /* 0 - 127 are FW events */
|
||||
};
|
||||
|
||||
enum mlx5_port_status {
|
||||
@ -277,6 +269,8 @@ struct mlx5_cmd_stats {
|
||||
};
|
||||
|
||||
struct mlx5_cmd {
|
||||
struct mlx5_nb nb;
|
||||
|
||||
void *cmd_alloc_buf;
|
||||
dma_addr_t alloc_dma;
|
||||
int alloc_size;
|
||||
@ -398,20 +392,6 @@ struct mlx5_core_rsc_common {
|
||||
struct completion free;
|
||||
};
|
||||
|
||||
struct mlx5_core_srq {
|
||||
struct mlx5_core_rsc_common common; /* must be first */
|
||||
u32 srqn;
|
||||
int max;
|
||||
size_t max_gs;
|
||||
size_t max_avail_gather;
|
||||
int wqe_shift;
|
||||
void (*event) (struct mlx5_core_srq *, enum mlx5_event);
|
||||
|
||||
atomic_t refcount;
|
||||
struct completion free;
|
||||
u16 uid;
|
||||
};
|
||||
|
||||
struct mlx5_uars_page {
|
||||
void __iomem *map;
|
||||
bool wc;
|
||||
@ -461,13 +441,8 @@ struct mlx5_core_health {
|
||||
};
|
||||
|
||||
struct mlx5_qp_table {
|
||||
/* protect radix tree
|
||||
*/
|
||||
spinlock_t lock;
|
||||
struct radix_tree_root tree;
|
||||
};
|
||||
struct notifier_block nb;
|
||||
|
||||
struct mlx5_srq_table {
|
||||
/* protect radix tree
|
||||
*/
|
||||
spinlock_t lock;
|
||||
@ -507,6 +482,7 @@ struct mlx5_fc_stats {
|
||||
unsigned long sampling_interval; /* jiffies */
|
||||
};
|
||||
|
||||
struct mlx5_events;
|
||||
struct mlx5_mpfs;
|
||||
struct mlx5_eswitch;
|
||||
struct mlx5_lag;
|
||||
@ -533,36 +509,12 @@ struct mlx5_rl_table {
|
||||
struct mlx5_rl_entry *rl_entry;
|
||||
};
|
||||
|
||||
enum port_module_event_status_type {
|
||||
MLX5_MODULE_STATUS_PLUGGED = 0x1,
|
||||
MLX5_MODULE_STATUS_UNPLUGGED = 0x2,
|
||||
MLX5_MODULE_STATUS_ERROR = 0x3,
|
||||
MLX5_MODULE_STATUS_NUM = 0x3,
|
||||
};
|
||||
|
||||
enum port_module_event_error_type {
|
||||
MLX5_MODULE_EVENT_ERROR_POWER_BUDGET_EXCEEDED,
|
||||
MLX5_MODULE_EVENT_ERROR_LONG_RANGE_FOR_NON_MLNX_CABLE_MODULE,
|
||||
MLX5_MODULE_EVENT_ERROR_BUS_STUCK,
|
||||
MLX5_MODULE_EVENT_ERROR_NO_EEPROM_RETRY_TIMEOUT,
|
||||
MLX5_MODULE_EVENT_ERROR_ENFORCE_PART_NUMBER_LIST,
|
||||
MLX5_MODULE_EVENT_ERROR_UNKNOWN_IDENTIFIER,
|
||||
MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE,
|
||||
MLX5_MODULE_EVENT_ERROR_BAD_CABLE,
|
||||
MLX5_MODULE_EVENT_ERROR_UNKNOWN,
|
||||
MLX5_MODULE_EVENT_ERROR_NUM,
|
||||
};
|
||||
|
||||
struct mlx5_port_module_event_stats {
|
||||
u64 status_counters[MLX5_MODULE_STATUS_NUM];
|
||||
u64 error_counters[MLX5_MODULE_EVENT_ERROR_NUM];
|
||||
};
|
||||
|
||||
struct mlx5_priv {
|
||||
char name[MLX5_MAX_NAME_LEN];
|
||||
struct mlx5_eq_table *eq_table;
|
||||
|
||||
/* pages stuff */
|
||||
struct mlx5_nb pg_nb;
|
||||
struct workqueue_struct *pg_wq;
|
||||
struct rb_root page_root;
|
||||
int fw_pages;
|
||||
@ -572,8 +524,6 @@ struct mlx5_priv {
|
||||
|
||||
struct mlx5_core_health health;
|
||||
|
||||
struct mlx5_srq_table srq_table;
|
||||
|
||||
/* start: qp staff */
|
||||
struct mlx5_qp_table qp_table;
|
||||
struct dentry *qp_debugfs;
|
||||
@ -603,9 +553,7 @@ struct mlx5_priv {
|
||||
struct list_head dev_list;
|
||||
struct list_head ctx_list;
|
||||
spinlock_t ctx_lock;
|
||||
|
||||
struct list_head waiting_events_list;
|
||||
bool is_accum_events;
|
||||
struct mlx5_events *events;
|
||||
|
||||
struct mlx5_flow_steering *steering;
|
||||
struct mlx5_mpfs *mpfs;
|
||||
@ -616,8 +564,6 @@ struct mlx5_priv {
|
||||
struct mlx5_fc_stats fc_stats;
|
||||
struct mlx5_rl_table rl_table;
|
||||
|
||||
struct mlx5_port_module_event_stats pme_stats;
|
||||
|
||||
struct mlx5_bfreg_data bfregs;
|
||||
struct mlx5_uars_page *uar;
|
||||
};
|
||||
@ -671,6 +617,8 @@ struct mlx5_pps {
|
||||
};
|
||||
|
||||
struct mlx5_clock {
|
||||
struct mlx5_core_dev *mdev;
|
||||
struct mlx5_nb pps_nb;
|
||||
seqlock_t lock;
|
||||
struct cyclecounter cycles;
|
||||
struct timecounter tc;
|
||||
@ -678,7 +626,6 @@ struct mlx5_clock {
|
||||
u32 nominal_c_mult;
|
||||
unsigned long overflow_period;
|
||||
struct delayed_work overflow_work;
|
||||
struct mlx5_core_dev *mdev;
|
||||
struct ptp_clock *ptp;
|
||||
struct ptp_clock_info ptp_info;
|
||||
struct mlx5_pps pps_info;
|
||||
@ -711,9 +658,6 @@ struct mlx5_core_dev {
|
||||
/* sync interface state */
|
||||
struct mutex intf_state_mutex;
|
||||
unsigned long intf_state;
|
||||
void (*event) (struct mlx5_core_dev *dev,
|
||||
enum mlx5_dev_event event,
|
||||
unsigned long param);
|
||||
struct mlx5_priv priv;
|
||||
struct mlx5_profile *profile;
|
||||
atomic_t num_qps;
|
||||
@ -935,13 +879,6 @@ struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev,
|
||||
gfp_t flags, int npages);
|
||||
void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev *dev,
|
||||
struct mlx5_cmd_mailbox *head);
|
||||
int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
|
||||
struct mlx5_srq_attr *in);
|
||||
int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq);
|
||||
int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
|
||||
struct mlx5_srq_attr *out);
|
||||
int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
|
||||
u16 lwm, int is_srq);
|
||||
void mlx5_init_mkey_table(struct mlx5_core_dev *dev);
|
||||
void mlx5_cleanup_mkey_table(struct mlx5_core_dev *dev);
|
||||
int mlx5_core_create_mkey_cb(struct mlx5_core_dev *dev,
|
||||
@ -960,9 +897,9 @@ int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn);
|
||||
int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn);
|
||||
int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb,
|
||||
u16 opmod, u8 port);
|
||||
void mlx5_pagealloc_init(struct mlx5_core_dev *dev);
|
||||
int mlx5_pagealloc_init(struct mlx5_core_dev *dev);
|
||||
void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev);
|
||||
int mlx5_pagealloc_start(struct mlx5_core_dev *dev);
|
||||
void mlx5_pagealloc_start(struct mlx5_core_dev *dev);
|
||||
void mlx5_pagealloc_stop(struct mlx5_core_dev *dev);
|
||||
void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
|
||||
s32 npages);
|
||||
@ -973,9 +910,6 @@ void mlx5_unregister_debugfs(void);
|
||||
|
||||
void mlx5_fill_page_array(struct mlx5_frag_buf *buf, __be64 *pas);
|
||||
void mlx5_fill_page_frag_array(struct mlx5_frag_buf *frag_buf, __be64 *pas);
|
||||
void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type);
|
||||
void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type);
|
||||
struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn);
|
||||
int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
|
||||
unsigned int *irqn);
|
||||
int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
|
||||
@ -1070,8 +1004,6 @@ struct mlx5_interface {
|
||||
void (*remove)(struct mlx5_core_dev *dev, void *context);
|
||||
int (*attach)(struct mlx5_core_dev *dev, void *context);
|
||||
void (*detach)(struct mlx5_core_dev *dev, void *context);
|
||||
void (*event)(struct mlx5_core_dev *dev, void *context,
|
||||
enum mlx5_dev_event event, unsigned long param);
|
||||
void * (*get_dev)(void *context);
|
||||
int protocol;
|
||||
struct list_head list;
|
||||
@ -1080,6 +1012,9 @@ struct mlx5_interface {
|
||||
void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol);
|
||||
int mlx5_register_interface(struct mlx5_interface *intf);
|
||||
void mlx5_unregister_interface(struct mlx5_interface *intf);
|
||||
int mlx5_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb);
|
||||
int mlx5_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb);
|
||||
|
||||
int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id);
|
||||
|
||||
int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev);
|
||||
|
@ -4,8 +4,6 @@
|
||||
#ifndef MLX5_CORE_EQ_H
|
||||
#define MLX5_CORE_EQ_H
|
||||
|
||||
#include <linux/mlx5/driver.h>
|
||||
|
||||
enum {
|
||||
MLX5_EQ_PAGEREQ_IDX = 0,
|
||||
MLX5_EQ_CMD_IDX = 1,
|
||||
@ -22,6 +20,7 @@ enum {
|
||||
#define MLX5_NUM_SPARE_EQE (0x80)
|
||||
|
||||
struct mlx5_eq;
|
||||
struct mlx5_core_dev;
|
||||
|
||||
struct mlx5_eq_param {
|
||||
u8 index;
|
||||
@ -57,4 +56,17 @@ static inline u32 mlx5_eq_update_cc(struct mlx5_eq *eq, u32 cc)
|
||||
return cc;
|
||||
}
|
||||
|
||||
struct mlx5_nb {
|
||||
struct notifier_block nb;
|
||||
u8 event_type;
|
||||
};
|
||||
|
||||
#define mlx5_nb_cof(ptr, type, member) \
|
||||
(container_of(container_of(ptr, struct mlx5_nb, nb), type, member))
|
||||
|
||||
#define MLX5_NB_INIT(name, handler, event) do { \
|
||||
(name)->nb.notifier_call = handler; \
|
||||
(name)->event_type = MLX5_EVENT_TYPE_##event; \
|
||||
} while (0)
|
||||
|
||||
#endif /* MLX5_CORE_EQ_H */
|
||||
|
@ -144,6 +144,9 @@ enum {
|
||||
MLX5_CMD_OP_DESTROY_XRQ = 0x718,
|
||||
MLX5_CMD_OP_QUERY_XRQ = 0x719,
|
||||
MLX5_CMD_OP_ARM_XRQ = 0x71a,
|
||||
MLX5_CMD_OP_QUERY_XRQ_DC_PARAMS_ENTRY = 0x725,
|
||||
MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY = 0x726,
|
||||
MLX5_CMD_OP_QUERY_XRQ_ERROR_PARAMS = 0x727,
|
||||
MLX5_CMD_OP_QUERY_VPORT_STATE = 0x750,
|
||||
MLX5_CMD_OP_MODIFY_VPORT_STATE = 0x751,
|
||||
MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT = 0x752,
|
||||
@ -245,6 +248,7 @@ enum {
|
||||
MLX5_CMD_OP_MODIFY_FLOW_TABLE = 0x93c,
|
||||
MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT = 0x93d,
|
||||
MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT = 0x93e,
|
||||
MLX5_CMD_OP_QUERY_PACKET_REFORMAT_CONTEXT = 0x93f,
|
||||
MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT = 0x940,
|
||||
MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT = 0x941,
|
||||
MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT = 0x942,
|
||||
@ -260,6 +264,12 @@ enum {
|
||||
MLX5_CMD_OP_MAX
|
||||
};
|
||||
|
||||
/* Valid range for general commands that don't work over an object */
|
||||
enum {
|
||||
MLX5_CMD_OP_GENERAL_START = 0xb00,
|
||||
MLX5_CMD_OP_GENERAL_END = 0xd00,
|
||||
};
|
||||
|
||||
struct mlx5_ifc_flow_table_fields_supported_bits {
|
||||
u8 outer_dmac[0x1];
|
||||
u8 outer_smac[0x1];
|
||||
@ -883,6 +893,10 @@ enum {
|
||||
MLX5_CAP_UMR_FENCE_NONE = 0x2,
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5_UCTX_CAP_RAW_TX = 1UL << 0,
|
||||
};
|
||||
|
||||
struct mlx5_ifc_cmd_hca_cap_bits {
|
||||
u8 reserved_at_0[0x30];
|
||||
u8 vhca_id[0x10];
|
||||
@ -1193,7 +1207,13 @@ struct mlx5_ifc_cmd_hca_cap_bits {
|
||||
u8 num_vhca_ports[0x8];
|
||||
u8 reserved_at_618[0x6];
|
||||
u8 sw_owner_id[0x1];
|
||||
u8 reserved_at_61f[0x1e1];
|
||||
u8 reserved_at_61f[0x1];
|
||||
|
||||
u8 reserved_at_620[0x80];
|
||||
|
||||
u8 uctx_cap[0x20];
|
||||
|
||||
u8 reserved_at_6c0[0x140];
|
||||
};
|
||||
|
||||
enum mlx5_flow_destination_type {
|
||||
@ -9276,7 +9296,9 @@ struct mlx5_ifc_umem_bits {
|
||||
struct mlx5_ifc_uctx_bits {
|
||||
u8 modify_field_select[0x40];
|
||||
|
||||
u8 reserved_at_40[0x1c0];
|
||||
u8 cap[0x20];
|
||||
|
||||
u8 reserved_at_60[0x1a0];
|
||||
};
|
||||
|
||||
struct mlx5_ifc_create_umem_in_bits {
|
||||
|
@ -107,9 +107,6 @@ enum mlx5e_connector_type {
|
||||
|
||||
#define MLX5E_PROT_MASK(link_mode) (1 << link_mode)
|
||||
|
||||
#define PORT_MODULE_EVENT_MODULE_STATUS_MASK 0xF
|
||||
#define PORT_MODULE_EVENT_ERROR_TYPE_MASK 0xF
|
||||
|
||||
int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps);
|
||||
int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys,
|
||||
int ptys_size, int proto_mask, u8 local_port);
|
||||
|
@ -1,72 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef MLX5_SRQ_H
|
||||
#define MLX5_SRQ_H
|
||||
|
||||
#include <linux/mlx5/driver.h>
|
||||
|
||||
enum {
|
||||
MLX5_SRQ_FLAG_ERR = (1 << 0),
|
||||
MLX5_SRQ_FLAG_WQ_SIG = (1 << 1),
|
||||
MLX5_SRQ_FLAG_RNDV = (1 << 2),
|
||||
};
|
||||
|
||||
struct mlx5_srq_attr {
|
||||
u32 type;
|
||||
u32 flags;
|
||||
u32 log_size;
|
||||
u32 wqe_shift;
|
||||
u32 log_page_size;
|
||||
u32 wqe_cnt;
|
||||
u32 srqn;
|
||||
u32 xrcd;
|
||||
u32 page_offset;
|
||||
u32 cqn;
|
||||
u32 pd;
|
||||
u32 lwm;
|
||||
u32 user_index;
|
||||
u64 db_record;
|
||||
__be64 *pas;
|
||||
u32 tm_log_list_size;
|
||||
u32 tm_next_tag;
|
||||
u32 tm_hw_phase_cnt;
|
||||
u32 tm_sw_phase_cnt;
|
||||
u16 uid;
|
||||
};
|
||||
|
||||
struct mlx5_core_dev;
|
||||
|
||||
void mlx5_init_srq_table(struct mlx5_core_dev *dev);
|
||||
void mlx5_cleanup_srq_table(struct mlx5_core_dev *dev);
|
||||
|
||||
#endif /* MLX5_SRQ_H */
|
@ -58,17 +58,6 @@ int mlx5_core_create_tis(struct mlx5_core_dev *dev, u32 *in, int inlen,
|
||||
int mlx5_core_modify_tis(struct mlx5_core_dev *dev, u32 tisn, u32 *in,
|
||||
int inlen);
|
||||
void mlx5_core_destroy_tis(struct mlx5_core_dev *dev, u32 tisn);
|
||||
int mlx5_core_create_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen,
|
||||
u32 *rmpn);
|
||||
int mlx5_core_modify_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen);
|
||||
int mlx5_core_destroy_rmp(struct mlx5_core_dev *dev, u32 rmpn);
|
||||
int mlx5_core_query_rmp(struct mlx5_core_dev *dev, u32 rmpn, u32 *out);
|
||||
int mlx5_core_arm_rmp(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm);
|
||||
int mlx5_core_create_xsrq(struct mlx5_core_dev *dev, u32 *in, int inlen,
|
||||
u32 *rmpn);
|
||||
int mlx5_core_destroy_xsrq(struct mlx5_core_dev *dev, u32 rmpn);
|
||||
int mlx5_core_arm_xsrq(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm);
|
||||
|
||||
int mlx5_core_create_rqt(struct mlx5_core_dev *dev, u32 *in, int inlen,
|
||||
u32 *rqtn);
|
||||
int mlx5_core_modify_rqt(struct mlx5_core_dev *dev, u32 rqtn, u32 *in,
|
||||
|
@ -524,6 +524,12 @@ struct uapi_definition {
|
||||
.u2.objs_arr.max_len = _max_len, \
|
||||
__VA_ARGS__ } })
|
||||
|
||||
/*
|
||||
* Only for use with UVERBS_ATTR_IDR, allows any uobject type to be accepted,
|
||||
* the user must validate the type of the uobject instead.
|
||||
*/
|
||||
#define UVERBS_IDR_ANY_OBJECT 0xFFFF
|
||||
|
||||
#define UVERBS_ATTR_IDR(_attr_id, _idr_type, _access, ...) \
|
||||
(&(const struct uverbs_attr_def){ \
|
||||
.id = _attr_id, \
|
||||
|
@ -182,5 +182,17 @@ static inline void ib_set_flow(struct ib_uobject *uobj, struct ib_flow *ibflow,
|
||||
uflow->resources = uflow_res;
|
||||
}
|
||||
|
||||
struct uverbs_api_object {
|
||||
const struct uverbs_obj_type *type_attrs;
|
||||
const struct uverbs_obj_type_class *type_class;
|
||||
u8 disabled:1;
|
||||
u32 id;
|
||||
};
|
||||
|
||||
static inline u32 uobj_get_object_id(struct ib_uobject *uobj)
|
||||
{
|
||||
return uobj->uapi_object->id;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user