mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-13 23:34:05 +08:00
RDMA: Remove uverbs_ex_cmd_mask values that are linked to functions
Since a while now the uverbs layer checks if the driver implements a function before allowing the ucmd to proceed. This largely obsoletes the cmd_mask stuff, but there is some tricky bits in drivers preventing it from being removed. Remove the easy elements of uverbs_ex_cmd_mask by pre-setting them in the core code. These are triggered soley based on the related ops function pointer. query_device_ex is not triggered based on an op, but all drivers already implement something compatible with the extension, so enable it globally too. Link: https://lore.kernel.org/r/2-v1-caa70ba3d1ab+1436e-ucmd_mask_jgg@nvidia.com Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
This commit is contained in:
parent
a5c29a262e
commit
b8e3130dd9
@ -600,6 +600,17 @@ struct ib_device *_ib_alloc_device(size_t size)
|
||||
init_completion(&device->unreg_completion);
|
||||
INIT_WORK(&device->unregistration_work, ib_unregister_work);
|
||||
|
||||
device->uverbs_ex_cmd_mask =
|
||||
BIT_ULL(IB_USER_VERBS_EX_CMD_CREATE_FLOW) |
|
||||
BIT_ULL(IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL) |
|
||||
BIT_ULL(IB_USER_VERBS_EX_CMD_CREATE_WQ) |
|
||||
BIT_ULL(IB_USER_VERBS_EX_CMD_DESTROY_FLOW) |
|
||||
BIT_ULL(IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL) |
|
||||
BIT_ULL(IB_USER_VERBS_EX_CMD_DESTROY_WQ) |
|
||||
BIT_ULL(IB_USER_VERBS_EX_CMD_MODIFY_CQ) |
|
||||
BIT_ULL(IB_USER_VERBS_EX_CMD_MODIFY_WQ) |
|
||||
BIT_ULL(IB_USER_VERBS_EX_CMD_QUERY_DEVICE);
|
||||
|
||||
return device;
|
||||
}
|
||||
EXPORT_SYMBOL(_ib_alloc_device);
|
||||
|
@ -3753,7 +3753,7 @@ const struct uapi_definition uverbs_def_write_intf[] = {
|
||||
IB_USER_VERBS_EX_CMD_MODIFY_CQ,
|
||||
ib_uverbs_ex_modify_cq,
|
||||
UAPI_DEF_WRITE_I(struct ib_uverbs_ex_modify_cq),
|
||||
UAPI_DEF_METHOD_NEEDS_FN(create_cq))),
|
||||
UAPI_DEF_METHOD_NEEDS_FN(modify_cq))),
|
||||
|
||||
DECLARE_UVERBS_OBJECT(
|
||||
UVERBS_OBJECT_DEVICE,
|
||||
|
@ -326,9 +326,6 @@ static int efa_ib_device_add(struct efa_dev *dev)
|
||||
(1ull << IB_USER_VERBS_CMD_CREATE_AH) |
|
||||
(1ull << IB_USER_VERBS_CMD_DESTROY_AH);
|
||||
|
||||
dev->ibdev.uverbs_ex_cmd_mask =
|
||||
(1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE);
|
||||
|
||||
ib_set_device_ops(&dev->ibdev, &efa_dev_ops);
|
||||
|
||||
err = ib_register_device(&dev->ibdev, "efa_%d", &pdev->dev);
|
||||
|
@ -2062,11 +2062,6 @@ static void hns_roce_v1_write_cqc(struct hns_roce_dev *hr_dev,
|
||||
CQ_CONTEXT_CQC_BYTE_32_CQ_CONS_IDX_S, 0);
|
||||
}
|
||||
|
||||
static int hns_roce_v1_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static int hns_roce_v1_req_notify_cq(struct ib_cq *ibcq,
|
||||
enum ib_cq_notify_flags flags)
|
||||
{
|
||||
@ -4347,7 +4342,6 @@ static void hns_roce_v1_cleanup_eq_table(struct hns_roce_dev *hr_dev)
|
||||
|
||||
static const struct ib_device_ops hns_roce_v1_dev_ops = {
|
||||
.destroy_qp = hns_roce_v1_destroy_qp,
|
||||
.modify_cq = hns_roce_v1_modify_cq,
|
||||
.poll_cq = hns_roce_v1_poll_cq,
|
||||
.post_recv = hns_roce_v1_post_recv,
|
||||
.post_send = hns_roce_v1_post_send,
|
||||
@ -4367,7 +4361,6 @@ static const struct hns_roce_hw hns_roce_hw_v1 = {
|
||||
.set_mtu = hns_roce_v1_set_mtu,
|
||||
.write_mtpt = hns_roce_v1_write_mtpt,
|
||||
.write_cqc = hns_roce_v1_write_cqc,
|
||||
.modify_cq = hns_roce_v1_modify_cq,
|
||||
.clear_hem = hns_roce_v1_clear_hem,
|
||||
.modify_qp = hns_roce_v1_modify_qp,
|
||||
.query_qp = hns_roce_v1_query_qp,
|
||||
|
@ -507,8 +507,6 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
|
||||
(1ULL << IB_USER_VERBS_CMD_QUERY_QP) |
|
||||
(1ULL << IB_USER_VERBS_CMD_DESTROY_QP);
|
||||
|
||||
ib_dev->uverbs_ex_cmd_mask |= (1ULL << IB_USER_VERBS_EX_CMD_MODIFY_CQ);
|
||||
|
||||
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_REREG_MR) {
|
||||
ib_dev->uverbs_cmd_mask |= (1ULL << IB_USER_VERBS_CMD_REREG_MR);
|
||||
ib_set_device_ops(ib_dev, &hns_roce_dev_mr_ops);
|
||||
|
@ -2685,8 +2685,6 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
|
||||
|
||||
ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_ops);
|
||||
ibdev->ib_dev.uverbs_ex_cmd_mask |=
|
||||
(1ull << IB_USER_VERBS_EX_CMD_MODIFY_CQ) |
|
||||
(1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE) |
|
||||
(1ull << IB_USER_VERBS_EX_CMD_CREATE_CQ) |
|
||||
(1ull << IB_USER_VERBS_EX_CMD_CREATE_QP);
|
||||
|
||||
@ -2694,15 +2692,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
|
||||
((mlx4_ib_port_link_layer(&ibdev->ib_dev, 1) ==
|
||||
IB_LINK_LAYER_ETHERNET) ||
|
||||
(mlx4_ib_port_link_layer(&ibdev->ib_dev, 2) ==
|
||||
IB_LINK_LAYER_ETHERNET))) {
|
||||
ibdev->ib_dev.uverbs_ex_cmd_mask |=
|
||||
(1ull << IB_USER_VERBS_EX_CMD_CREATE_WQ) |
|
||||
(1ull << IB_USER_VERBS_EX_CMD_MODIFY_WQ) |
|
||||
(1ull << IB_USER_VERBS_EX_CMD_DESTROY_WQ) |
|
||||
(1ull << IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL) |
|
||||
(1ull << IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL);
|
||||
IB_LINK_LAYER_ETHERNET)))
|
||||
ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_wq_ops);
|
||||
}
|
||||
|
||||
if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW ||
|
||||
dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) {
|
||||
@ -2721,9 +2712,6 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
|
||||
|
||||
if (check_flow_steering_support(dev)) {
|
||||
ibdev->steering_support = MLX4_STEERING_MODE_DEVICE_MANAGED;
|
||||
ibdev->ib_dev.uverbs_ex_cmd_mask |=
|
||||
(1ull << IB_USER_VERBS_EX_CMD_CREATE_FLOW) |
|
||||
(1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW);
|
||||
ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_fs_ops);
|
||||
}
|
||||
|
||||
|
@ -4166,14 +4166,10 @@ static int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev)
|
||||
(1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
|
||||
(1ull << IB_USER_VERBS_CMD_CREATE_XSRQ) |
|
||||
(1ull << IB_USER_VERBS_CMD_OPEN_QP);
|
||||
dev->ib_dev.uverbs_ex_cmd_mask =
|
||||
(1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE) |
|
||||
dev->ib_dev.uverbs_ex_cmd_mask |=
|
||||
(1ull << IB_USER_VERBS_EX_CMD_CREATE_CQ) |
|
||||
(1ull << IB_USER_VERBS_EX_CMD_CREATE_QP) |
|
||||
(1ull << IB_USER_VERBS_EX_CMD_MODIFY_QP) |
|
||||
(1ull << IB_USER_VERBS_EX_CMD_MODIFY_CQ) |
|
||||
(1ull << IB_USER_VERBS_EX_CMD_CREATE_FLOW) |
|
||||
(1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW);
|
||||
(1ull << IB_USER_VERBS_EX_CMD_MODIFY_QP);
|
||||
|
||||
if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads) &&
|
||||
IS_ENABLED(CONFIG_MLX5_CORE_IPOIB))
|
||||
@ -4276,12 +4272,6 @@ static int mlx5_ib_roce_init(struct mlx5_ib_dev *dev)
|
||||
ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
|
||||
|
||||
if (ll == IB_LINK_LAYER_ETHERNET) {
|
||||
dev->ib_dev.uverbs_ex_cmd_mask |=
|
||||
(1ull << IB_USER_VERBS_EX_CMD_CREATE_WQ) |
|
||||
(1ull << IB_USER_VERBS_EX_CMD_MODIFY_WQ) |
|
||||
(1ull << IB_USER_VERBS_EX_CMD_DESTROY_WQ) |
|
||||
(1ull << IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL) |
|
||||
(1ull << IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL);
|
||||
ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_common_roce_ops);
|
||||
|
||||
port_num = mlx5_core_native_port_num(dev->mdev) - 1;
|
||||
|
Loading…
Reference in New Issue
Block a user