IB/mlx5: Allocate resources just before first QP/SRQ is created

Previously, all IB dev resources are initialized on driver load. As
they are not always used, move the initialization to the time when
they are needed.

To be more specific, move PD (p0) and CQ (c0) initialization to the
time when the first SRQ is created. and move SRQs(s0 and s1)
initialization to the time first QP is created. To avoid concurrent
creations, two new mutexes are also added.

Signed-off-by: Jianbo Liu <jianbol@nvidia.com>
Link: https://lore.kernel.org/r/98c3e53a8cc0bdfeb6dec6e5bb8b037d78ab00d8.1717409369.git.leon@kernel.org
Signed-off-by: Leon Romanovsky <leon@kernel.org>
This commit is contained in:
Jianbo Liu 2024-06-03 13:26:39 +03:00 committed by Leon Romanovsky
parent 638420115c
commit 5895e70f2e
4 changed files with 122 additions and 47 deletions

View File

@ -2823,37 +2823,72 @@ static u8 mlx5_get_umr_fence(u8 umr_fence_cap)
}
}
static int mlx5_ib_dev_res_init(struct mlx5_ib_dev *dev)
int mlx5_ib_dev_res_cq_init(struct mlx5_ib_dev *dev)
{
struct mlx5_ib_resources *devr = &dev->devr;
struct ib_cq_init_attr cq_attr = {.cqe = 1};
struct ib_device *ibdev;
struct ib_pd *pd;
struct ib_cq *cq;
int ret = 0;
/*
* devr->c0 is set once, never changed until device unload.
* Avoid taking the mutex if initialization is already done.
*/
if (devr->c0)
return 0;
mutex_lock(&devr->cq_lock);
if (devr->c0)
goto unlock;
ibdev = &dev->ib_dev;
pd = ib_alloc_pd(ibdev, 0);
if (IS_ERR(pd)) {
ret = PTR_ERR(pd);
mlx5_ib_err(dev, "Couldn't allocate PD for res init, err=%d\n", ret);
goto unlock;
}
cq = ib_create_cq(ibdev, NULL, NULL, NULL, &cq_attr);
if (IS_ERR(cq)) {
ret = PTR_ERR(cq);
mlx5_ib_err(dev, "Couldn't create CQ for res init, err=%d\n", ret);
ib_dealloc_pd(pd);
goto unlock;
}
devr->p0 = pd;
devr->c0 = cq;
unlock:
mutex_unlock(&devr->cq_lock);
return ret;
}
int mlx5_ib_dev_res_srq_init(struct mlx5_ib_dev *dev)
{
struct mlx5_ib_resources *devr = &dev->devr;
struct ib_srq_init_attr attr;
struct ib_device *ibdev;
struct ib_cq_init_attr cq_attr = {.cqe = 1};
int port;
struct ib_srq *s0, *s1;
int ret = 0;
ibdev = &dev->ib_dev;
/*
* devr->s1 is set once, never changed until device unload.
* Avoid taking the mutex if initialization is already done.
*/
if (devr->s1)
return 0;
if (!MLX5_CAP_GEN(dev->mdev, xrc))
return -EOPNOTSUPP;
mutex_lock(&devr->srq_lock);
if (devr->s1)
goto unlock;
devr->p0 = ib_alloc_pd(ibdev, 0);
if (IS_ERR(devr->p0))
return PTR_ERR(devr->p0);
devr->c0 = ib_create_cq(ibdev, NULL, NULL, NULL, &cq_attr);
if (IS_ERR(devr->c0)) {
ret = PTR_ERR(devr->c0);
goto error1;
}
ret = mlx5_cmd_xrcd_alloc(dev->mdev, &devr->xrcdn0, 0);
ret = mlx5_ib_dev_res_cq_init(dev);
if (ret)
goto error2;
ret = mlx5_cmd_xrcd_alloc(dev->mdev, &devr->xrcdn1, 0);
if (ret)
goto error3;
goto unlock;
memset(&attr, 0, sizeof(attr));
attr.attr.max_sge = 1;
@ -2861,10 +2896,11 @@ static int mlx5_ib_dev_res_init(struct mlx5_ib_dev *dev)
attr.srq_type = IB_SRQT_XRC;
attr.ext.cq = devr->c0;
devr->s0 = ib_create_srq(devr->p0, &attr);
if (IS_ERR(devr->s0)) {
ret = PTR_ERR(devr->s0);
goto err_create;
s0 = ib_create_srq(devr->p0, &attr);
if (IS_ERR(s0)) {
ret = PTR_ERR(s0);
mlx5_ib_err(dev, "Couldn't create SRQ 0 for res init, err=%d\n", ret);
goto unlock;
}
memset(&attr, 0, sizeof(attr));
@ -2872,29 +2908,48 @@ static int mlx5_ib_dev_res_init(struct mlx5_ib_dev *dev)
attr.attr.max_wr = 1;
attr.srq_type = IB_SRQT_BASIC;
devr->s1 = ib_create_srq(devr->p0, &attr);
if (IS_ERR(devr->s1)) {
ret = PTR_ERR(devr->s1);
goto error6;
s1 = ib_create_srq(devr->p0, &attr);
if (IS_ERR(s1)) {
ret = PTR_ERR(s1);
mlx5_ib_err(dev, "Couldn't create SRQ 1 for res init, err=%d\n", ret);
ib_destroy_srq(s0);
}
devr->s0 = s0;
devr->s1 = s1;
unlock:
mutex_unlock(&devr->srq_lock);
return ret;
}
static int mlx5_ib_dev_res_init(struct mlx5_ib_dev *dev)
{
struct mlx5_ib_resources *devr = &dev->devr;
int port;
int ret;
if (!MLX5_CAP_GEN(dev->mdev, xrc))
return -EOPNOTSUPP;
ret = mlx5_cmd_xrcd_alloc(dev->mdev, &devr->xrcdn0, 0);
if (ret)
return ret;
ret = mlx5_cmd_xrcd_alloc(dev->mdev, &devr->xrcdn1, 0);
if (ret) {
mlx5_cmd_xrcd_dealloc(dev->mdev, devr->xrcdn0, 0);
return ret;
}
for (port = 0; port < ARRAY_SIZE(devr->ports); ++port)
INIT_WORK(&devr->ports[port].pkey_change_work,
pkey_change_handler);
return 0;
mutex_init(&devr->cq_lock);
mutex_init(&devr->srq_lock);
error6:
ib_destroy_srq(devr->s0);
err_create:
mlx5_cmd_xrcd_dealloc(dev->mdev, devr->xrcdn1, 0);
error3:
mlx5_cmd_xrcd_dealloc(dev->mdev, devr->xrcdn0, 0);
error2:
ib_destroy_cq(devr->c0);
error1:
ib_dealloc_pd(devr->p0);
return ret;
return 0;
}
static void mlx5_ib_dev_res_cleanup(struct mlx5_ib_dev *dev)
@ -2911,12 +2966,20 @@ static void mlx5_ib_dev_res_cleanup(struct mlx5_ib_dev *dev)
for (port = 0; port < ARRAY_SIZE(devr->ports); ++port)
cancel_work_sync(&devr->ports[port].pkey_change_work);
ib_destroy_srq(devr->s1);
ib_destroy_srq(devr->s0);
/* After s0/s1 init, they are not unset during the device lifetime. */
if (devr->s1) {
ib_destroy_srq(devr->s1);
ib_destroy_srq(devr->s0);
}
mlx5_cmd_xrcd_dealloc(dev->mdev, devr->xrcdn1, 0);
mlx5_cmd_xrcd_dealloc(dev->mdev, devr->xrcdn0, 0);
ib_destroy_cq(devr->c0);
ib_dealloc_pd(devr->p0);
/* After p0/c0 init, they are not unset during the device lifetime. */
if (devr->c0) {
ib_destroy_cq(devr->c0);
ib_dealloc_pd(devr->p0);
}
mutex_destroy(&devr->cq_lock);
mutex_destroy(&devr->srq_lock);
}
static u32 get_core_cap_flags(struct ib_device *ibdev,

View File

@ -824,11 +824,13 @@ struct mlx5_ib_port_resources {
struct mlx5_ib_resources {
struct ib_cq *c0;
struct mutex cq_lock;
u32 xrcdn0;
u32 xrcdn1;
struct ib_pd *p0;
struct ib_srq *s0;
struct ib_srq *s1;
struct mutex srq_lock;
struct mlx5_ib_port_resources ports[2];
};
@ -1272,6 +1274,8 @@ to_mmmap(struct rdma_user_mmap_entry *rdma_entry)
struct mlx5_user_mmap_entry, rdma_entry);
}
int mlx5_ib_dev_res_cq_init(struct mlx5_ib_dev *dev);
int mlx5_ib_dev_res_srq_init(struct mlx5_ib_dev *dev);
int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, unsigned long virt,
struct mlx5_db *db);
void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db);

View File

@ -3234,6 +3234,10 @@ int mlx5_ib_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attr,
enum ib_qp_type type;
int err;
err = mlx5_ib_dev_res_srq_init(dev);
if (err)
return err;
err = check_qp_type(dev, attr, &type);
if (err)
return err;

View File

@ -213,6 +213,10 @@ int mlx5_ib_create_srq(struct ib_srq *ib_srq,
return -EINVAL;
}
err = mlx5_ib_dev_res_cq_init(dev);
if (err)
return err;
mutex_init(&srq->mutex);
spin_lock_init(&srq->lock);
srq->msrq.max = roundup_pow_of_two(init_attr->attr.max_wr + 1);