mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-18 17:54:13 +08:00
mlx4_core: Add QP range reservation support
To allow allocating an aligned range of consecutive QP numbers, add an interface to reserve an aligned range of QP numbers and have the QP allocation function always take a QP number. This will be used for RSS support in the mlx4_en Ethernet driver and also potentially by IPoIB RSS support. Signed-off-by: Yevgeny Petrilin <yevgenyp@mellanox.co.il> Signed-off-by: Roland Dreier <rolandd@cisco.com>
This commit is contained in:
parent
f6bccf6954
commit
a3cdcbfa8f
@ -451,6 +451,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
|
||||
struct ib_qp_init_attr *init_attr,
|
||||
struct ib_udata *udata, int sqpn, struct mlx4_ib_qp *qp)
|
||||
{
|
||||
int qpn;
|
||||
int err;
|
||||
|
||||
mutex_init(&qp->mutex);
|
||||
@ -545,9 +546,17 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
|
||||
}
|
||||
}
|
||||
|
||||
err = mlx4_qp_alloc(dev->dev, sqpn, &qp->mqp);
|
||||
if (sqpn) {
|
||||
qpn = sqpn;
|
||||
} else {
|
||||
err = mlx4_qp_reserve_range(dev->dev, 1, 1, &qpn);
|
||||
if (err)
|
||||
goto err_wrid;
|
||||
}
|
||||
|
||||
err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp);
|
||||
if (err)
|
||||
goto err_wrid;
|
||||
goto err_qpn;
|
||||
|
||||
/*
|
||||
* Hardware wants QPN written in big-endian order (after
|
||||
@ -560,6 +569,10 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
|
||||
|
||||
return 0;
|
||||
|
||||
err_qpn:
|
||||
if (!sqpn)
|
||||
mlx4_qp_release_range(dev->dev, qpn, 1);
|
||||
|
||||
err_wrid:
|
||||
if (pd->uobject) {
|
||||
if (!init_attr->srq)
|
||||
@ -655,6 +668,10 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
|
||||
mlx4_ib_unlock_cqs(send_cq, recv_cq);
|
||||
|
||||
mlx4_qp_free(dev->dev, &qp->mqp);
|
||||
|
||||
if (!is_sqp(dev, qp))
|
||||
mlx4_qp_release_range(dev->dev, qp->mqp.qpn, 1);
|
||||
|
||||
mlx4_mtt_cleanup(dev->dev, &qp->mtt);
|
||||
|
||||
if (is_user) {
|
||||
|
@ -65,10 +65,82 @@ u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap)
|
||||
|
||||
void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj)
|
||||
{
|
||||
mlx4_bitmap_free_range(bitmap, obj, 1);
|
||||
}
|
||||
|
||||
static unsigned long find_aligned_range(unsigned long *bitmap,
|
||||
u32 start, u32 nbits,
|
||||
int len, int align)
|
||||
{
|
||||
unsigned long end, i;
|
||||
|
||||
again:
|
||||
start = ALIGN(start, align);
|
||||
|
||||
while ((start < nbits) && test_bit(start, bitmap))
|
||||
start += align;
|
||||
|
||||
if (start >= nbits)
|
||||
return -1;
|
||||
|
||||
end = start+len;
|
||||
if (end > nbits)
|
||||
return -1;
|
||||
|
||||
for (i = start + 1; i < end; i++) {
|
||||
if (test_bit(i, bitmap)) {
|
||||
start = i + 1;
|
||||
goto again;
|
||||
}
|
||||
}
|
||||
|
||||
return start;
|
||||
}
|
||||
|
||||
u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align)
|
||||
{
|
||||
u32 obj, i;
|
||||
|
||||
if (likely(cnt == 1 && align == 1))
|
||||
return mlx4_bitmap_alloc(bitmap);
|
||||
|
||||
spin_lock(&bitmap->lock);
|
||||
|
||||
obj = find_aligned_range(bitmap->table, bitmap->last,
|
||||
bitmap->max, cnt, align);
|
||||
if (obj >= bitmap->max) {
|
||||
bitmap->top = (bitmap->top + bitmap->max) & bitmap->mask;
|
||||
obj = find_aligned_range(bitmap->table, 0,
|
||||
bitmap->max,
|
||||
cnt, align);
|
||||
}
|
||||
|
||||
if (obj < bitmap->max) {
|
||||
for (i = 0; i < cnt; i++)
|
||||
set_bit(obj + i, bitmap->table);
|
||||
if (obj == bitmap->last) {
|
||||
bitmap->last = (obj + cnt);
|
||||
if (bitmap->last >= bitmap->max)
|
||||
bitmap->last = 0;
|
||||
}
|
||||
obj |= bitmap->top;
|
||||
} else
|
||||
obj = -1;
|
||||
|
||||
spin_unlock(&bitmap->lock);
|
||||
|
||||
return obj;
|
||||
}
|
||||
|
||||
void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt)
|
||||
{
|
||||
u32 i;
|
||||
|
||||
obj &= bitmap->max - 1;
|
||||
|
||||
spin_lock(&bitmap->lock);
|
||||
clear_bit(obj, bitmap->table);
|
||||
for (i = 0; i < cnt; i++)
|
||||
clear_bit(obj + i, bitmap->table);
|
||||
bitmap->last = min(bitmap->last, obj);
|
||||
bitmap->top = (bitmap->top + bitmap->max) & bitmap->mask;
|
||||
spin_unlock(&bitmap->lock);
|
||||
|
@ -288,6 +288,8 @@ static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev)
|
||||
|
||||
u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap);
|
||||
void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj);
|
||||
u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align);
|
||||
void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt);
|
||||
int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask, u32 reserved);
|
||||
void mlx4_bitmap_cleanup(struct mlx4_bitmap *bitmap);
|
||||
|
||||
|
@ -147,19 +147,42 @@ int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx4_qp_modify);
|
||||
|
||||
int mlx4_qp_alloc(struct mlx4_dev *dev, int sqpn, struct mlx4_qp *qp)
|
||||
int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base)
|
||||
{
|
||||
struct mlx4_priv *priv = mlx4_priv(dev);
|
||||
struct mlx4_qp_table *qp_table = &priv->qp_table;
|
||||
int qpn;
|
||||
|
||||
qpn = mlx4_bitmap_alloc_range(&qp_table->bitmap, cnt, align);
|
||||
if (qpn == -1)
|
||||
return -ENOMEM;
|
||||
|
||||
*base = qpn;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx4_qp_reserve_range);
|
||||
|
||||
void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
|
||||
{
|
||||
struct mlx4_priv *priv = mlx4_priv(dev);
|
||||
struct mlx4_qp_table *qp_table = &priv->qp_table;
|
||||
if (base_qpn < dev->caps.sqp_start + 8)
|
||||
return;
|
||||
|
||||
mlx4_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx4_qp_release_range);
|
||||
|
||||
int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp)
|
||||
{
|
||||
struct mlx4_priv *priv = mlx4_priv(dev);
|
||||
struct mlx4_qp_table *qp_table = &priv->qp_table;
|
||||
int err;
|
||||
|
||||
if (sqpn)
|
||||
qp->qpn = sqpn;
|
||||
else {
|
||||
qp->qpn = mlx4_bitmap_alloc(&qp_table->bitmap);
|
||||
if (qp->qpn == -1)
|
||||
return -ENOMEM;
|
||||
}
|
||||
if (!qpn)
|
||||
return -EINVAL;
|
||||
|
||||
qp->qpn = qpn;
|
||||
|
||||
err = mlx4_table_get(dev, &qp_table->qp_table, qp->qpn);
|
||||
if (err)
|
||||
@ -208,9 +231,6 @@ err_put_qp:
|
||||
mlx4_table_put(dev, &qp_table->qp_table, qp->qpn);
|
||||
|
||||
err_out:
|
||||
if (!sqpn)
|
||||
mlx4_bitmap_free(&qp_table->bitmap, qp->qpn);
|
||||
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx4_qp_alloc);
|
||||
@ -239,9 +259,6 @@ void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp)
|
||||
mlx4_table_put(dev, &qp_table->altc_table, qp->qpn);
|
||||
mlx4_table_put(dev, &qp_table->auxc_table, qp->qpn);
|
||||
mlx4_table_put(dev, &qp_table->qp_table, qp->qpn);
|
||||
|
||||
if (qp->qpn >= dev->caps.sqp_start + 8)
|
||||
mlx4_bitmap_free(&qp_table->bitmap, qp->qpn);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx4_qp_free);
|
||||
|
||||
|
@ -400,7 +400,10 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
|
||||
int collapsed);
|
||||
void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq);
|
||||
|
||||
int mlx4_qp_alloc(struct mlx4_dev *dev, int sqpn, struct mlx4_qp *qp);
|
||||
int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base);
|
||||
void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt);
|
||||
|
||||
int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp);
|
||||
void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp);
|
||||
|
||||
int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, struct mlx4_mtt *mtt,
|
||||
|
Loading…
Reference in New Issue
Block a user