mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-29 07:34:06 +08:00
net/mlx5: Move device memory management to mlx5_core
Move the device memory allocation and deallocation commands SW ICM memory to mlx5_core to expose this API for all mlx5_core users. This comes as preparation for supporting SW steering in kernel where it will be required to allocate and register device memory for direct rule insertion. In addition, an API to register this device memory for future remote access operations is introduced using the create_mkey commands. Signed-off-by: Ariel Levkovich <lariel@mellanox.com> Reviewed-by: Mark Bloch <markb@mellanox.com> Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
This commit is contained in:
parent
00679b631e
commit
c9b9dcb430
@ -186,136 +186,6 @@ int mlx5_cmd_dealloc_memic(struct mlx5_dm *dm, phys_addr_t addr, u64 length)
|
||||
return err;
|
||||
}
|
||||
|
||||
int mlx5_cmd_alloc_sw_icm(struct mlx5_dm *dm, int type, u64 length,
|
||||
u16 uid, phys_addr_t *addr, u32 *obj_id)
|
||||
{
|
||||
struct mlx5_core_dev *dev = dm->dev;
|
||||
u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {};
|
||||
u32 in[MLX5_ST_SZ_DW(create_sw_icm_in)] = {};
|
||||
unsigned long *block_map;
|
||||
u64 icm_start_addr;
|
||||
u32 log_icm_size;
|
||||
u32 num_blocks;
|
||||
u32 max_blocks;
|
||||
u64 block_idx;
|
||||
void *sw_icm;
|
||||
int ret;
|
||||
|
||||
MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
|
||||
MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
|
||||
MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_OBJ_TYPE_SW_ICM);
|
||||
MLX5_SET(general_obj_in_cmd_hdr, in, uid, uid);
|
||||
|
||||
switch (type) {
|
||||
case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
|
||||
icm_start_addr = MLX5_CAP64_DEV_MEM(dev,
|
||||
steering_sw_icm_start_address);
|
||||
log_icm_size = MLX5_CAP_DEV_MEM(dev, log_steering_sw_icm_size);
|
||||
block_map = dm->steering_sw_icm_alloc_blocks;
|
||||
break;
|
||||
case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
|
||||
icm_start_addr = MLX5_CAP64_DEV_MEM(dev,
|
||||
header_modify_sw_icm_start_address);
|
||||
log_icm_size = MLX5_CAP_DEV_MEM(dev,
|
||||
log_header_modify_sw_icm_size);
|
||||
block_map = dm->header_modify_sw_icm_alloc_blocks;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
num_blocks = (length + MLX5_SW_ICM_BLOCK_SIZE(dev) - 1) >>
|
||||
MLX5_LOG_SW_ICM_BLOCK_SIZE(dev);
|
||||
max_blocks = BIT(log_icm_size - MLX5_LOG_SW_ICM_BLOCK_SIZE(dev));
|
||||
spin_lock(&dm->lock);
|
||||
block_idx = bitmap_find_next_zero_area(block_map,
|
||||
max_blocks,
|
||||
0,
|
||||
num_blocks, 0);
|
||||
|
||||
if (block_idx < max_blocks)
|
||||
bitmap_set(block_map,
|
||||
block_idx, num_blocks);
|
||||
|
||||
spin_unlock(&dm->lock);
|
||||
|
||||
if (block_idx >= max_blocks)
|
||||
return -ENOMEM;
|
||||
|
||||
sw_icm = MLX5_ADDR_OF(create_sw_icm_in, in, sw_icm);
|
||||
icm_start_addr += block_idx << MLX5_LOG_SW_ICM_BLOCK_SIZE(dev);
|
||||
MLX5_SET64(sw_icm, sw_icm, sw_icm_start_addr,
|
||||
icm_start_addr);
|
||||
MLX5_SET(sw_icm, sw_icm, log_sw_icm_size, ilog2(length));
|
||||
|
||||
ret = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
|
||||
if (ret) {
|
||||
spin_lock(&dm->lock);
|
||||
bitmap_clear(block_map,
|
||||
block_idx, num_blocks);
|
||||
spin_unlock(&dm->lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
*addr = icm_start_addr;
|
||||
*obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int mlx5_cmd_dealloc_sw_icm(struct mlx5_dm *dm, int type, u64 length,
|
||||
u16 uid, phys_addr_t addr, u32 obj_id)
|
||||
{
|
||||
struct mlx5_core_dev *dev = dm->dev;
|
||||
u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {};
|
||||
u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
|
||||
unsigned long *block_map;
|
||||
u32 num_blocks;
|
||||
u64 start_idx;
|
||||
int err;
|
||||
|
||||
num_blocks = (length + MLX5_SW_ICM_BLOCK_SIZE(dev) - 1) >>
|
||||
MLX5_LOG_SW_ICM_BLOCK_SIZE(dev);
|
||||
|
||||
switch (type) {
|
||||
case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
|
||||
start_idx =
|
||||
(addr - MLX5_CAP64_DEV_MEM(
|
||||
dev, steering_sw_icm_start_address)) >>
|
||||
MLX5_LOG_SW_ICM_BLOCK_SIZE(dev);
|
||||
block_map = dm->steering_sw_icm_alloc_blocks;
|
||||
break;
|
||||
case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
|
||||
start_idx =
|
||||
(addr -
|
||||
MLX5_CAP64_DEV_MEM(
|
||||
dev, header_modify_sw_icm_start_address)) >>
|
||||
MLX5_LOG_SW_ICM_BLOCK_SIZE(dev);
|
||||
block_map = dm->header_modify_sw_icm_alloc_blocks;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
|
||||
MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
|
||||
MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_OBJ_TYPE_SW_ICM);
|
||||
MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, obj_id);
|
||||
MLX5_SET(general_obj_in_cmd_hdr, in, uid, uid);
|
||||
|
||||
err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
spin_lock(&dm->lock);
|
||||
bitmap_clear(block_map,
|
||||
start_idx, num_blocks);
|
||||
spin_unlock(&dm->lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int mlx5_cmd_query_ext_ppcnt_counters(struct mlx5_core_dev *dev, void *out)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
|
||||
|
@ -65,8 +65,4 @@ int mlx5_cmd_alloc_q_counter(struct mlx5_core_dev *dev, u16 *counter_id,
|
||||
u16 uid);
|
||||
int mlx5_cmd_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb,
|
||||
u16 opmod, u8 port);
|
||||
int mlx5_cmd_alloc_sw_icm(struct mlx5_dm *dm, int type, u64 length,
|
||||
u16 uid, phys_addr_t *addr, u32 *obj_id);
|
||||
int mlx5_cmd_dealloc_sw_icm(struct mlx5_dm *dm, int type, u64 length,
|
||||
u16 uid, phys_addr_t addr, u32 obj_id);
|
||||
#endif /* MLX5_IB_CMD_H */
|
||||
|
@ -2280,6 +2280,7 @@ static inline int check_dm_type_support(struct mlx5_ib_dev *dev,
|
||||
return -EOPNOTSUPP;
|
||||
break;
|
||||
case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
|
||||
case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
|
||||
if (!capable(CAP_SYS_RAWIO) ||
|
||||
!capable(CAP_NET_RAW))
|
||||
return -EPERM;
|
||||
@ -2344,20 +2345,20 @@ static int handle_alloc_dm_sw_icm(struct ib_ucontext *ctx,
|
||||
struct uverbs_attr_bundle *attrs,
|
||||
int type)
|
||||
{
|
||||
struct mlx5_dm *dm_db = &to_mdev(ctx->device)->dm;
|
||||
struct mlx5_core_dev *dev = to_mdev(ctx->device)->mdev;
|
||||
u64 act_size;
|
||||
int err;
|
||||
|
||||
/* Allocation size must a multiple of the basic block size
|
||||
* and a power of 2.
|
||||
*/
|
||||
act_size = round_up(attr->length, MLX5_SW_ICM_BLOCK_SIZE(dm_db->dev));
|
||||
act_size = round_up(attr->length, MLX5_SW_ICM_BLOCK_SIZE(dev));
|
||||
act_size = roundup_pow_of_two(act_size);
|
||||
|
||||
dm->size = act_size;
|
||||
err = mlx5_cmd_alloc_sw_icm(dm_db, type, act_size,
|
||||
to_mucontext(ctx)->devx_uid, &dm->dev_addr,
|
||||
&dm->icm_dm.obj_id);
|
||||
err = mlx5_dm_sw_icm_alloc(dev, type, act_size,
|
||||
to_mucontext(ctx)->devx_uid, &dm->dev_addr,
|
||||
&dm->icm_dm.obj_id);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@ -2365,9 +2366,9 @@ static int handle_alloc_dm_sw_icm(struct ib_ucontext *ctx,
|
||||
MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET,
|
||||
&dm->dev_addr, sizeof(dm->dev_addr));
|
||||
if (err)
|
||||
mlx5_cmd_dealloc_sw_icm(dm_db, type, dm->size,
|
||||
to_mucontext(ctx)->devx_uid,
|
||||
dm->dev_addr, dm->icm_dm.obj_id);
|
||||
mlx5_dm_sw_icm_dealloc(dev, type, dm->size,
|
||||
to_mucontext(ctx)->devx_uid, dm->dev_addr,
|
||||
dm->icm_dm.obj_id);
|
||||
|
||||
return err;
|
||||
}
|
||||
@ -2407,8 +2408,14 @@ struct ib_dm *mlx5_ib_alloc_dm(struct ib_device *ibdev,
|
||||
attrs);
|
||||
break;
|
||||
case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
|
||||
err = handle_alloc_dm_sw_icm(context, dm,
|
||||
attr, attrs,
|
||||
MLX5_SW_ICM_TYPE_STEERING);
|
||||
break;
|
||||
case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
|
||||
err = handle_alloc_dm_sw_icm(context, dm, attr, attrs, type);
|
||||
err = handle_alloc_dm_sw_icm(context, dm,
|
||||
attr, attrs,
|
||||
MLX5_SW_ICM_TYPE_HEADER_MODIFY);
|
||||
break;
|
||||
default:
|
||||
err = -EOPNOTSUPP;
|
||||
@ -2428,6 +2435,7 @@ int mlx5_ib_dealloc_dm(struct ib_dm *ibdm, struct uverbs_attr_bundle *attrs)
|
||||
{
|
||||
struct mlx5_ib_ucontext *ctx = rdma_udata_to_drv_context(
|
||||
&attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
|
||||
struct mlx5_core_dev *dev = to_mdev(ibdm->device)->mdev;
|
||||
struct mlx5_dm *dm_db = &to_mdev(ibdm->device)->dm;
|
||||
struct mlx5_ib_dm *dm = to_mdm(ibdm);
|
||||
u32 page_idx;
|
||||
@ -2439,19 +2447,23 @@ int mlx5_ib_dealloc_dm(struct ib_dm *ibdm, struct uverbs_attr_bundle *attrs)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
page_idx = (dm->dev_addr -
|
||||
pci_resource_start(dm_db->dev->pdev, 0) -
|
||||
MLX5_CAP64_DEV_MEM(dm_db->dev,
|
||||
memic_bar_start_addr)) >>
|
||||
PAGE_SHIFT;
|
||||
page_idx = (dm->dev_addr - pci_resource_start(dev->pdev, 0) -
|
||||
MLX5_CAP64_DEV_MEM(dev, memic_bar_start_addr)) >>
|
||||
PAGE_SHIFT;
|
||||
bitmap_clear(ctx->dm_pages, page_idx,
|
||||
DIV_ROUND_UP(dm->size, PAGE_SIZE));
|
||||
break;
|
||||
case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
|
||||
ret = mlx5_dm_sw_icm_dealloc(dev, MLX5_SW_ICM_TYPE_STEERING,
|
||||
dm->size, ctx->devx_uid, dm->dev_addr,
|
||||
dm->icm_dm.obj_id);
|
||||
if (ret)
|
||||
return ret;
|
||||
break;
|
||||
case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
|
||||
ret = mlx5_cmd_dealloc_sw_icm(dm_db, dm->type, dm->size,
|
||||
ctx->devx_uid, dm->dev_addr,
|
||||
dm->icm_dm.obj_id);
|
||||
ret = mlx5_dm_sw_icm_dealloc(dev, MLX5_SW_ICM_TYPE_HEADER_MODIFY,
|
||||
dm->size, ctx->devx_uid, dm->dev_addr,
|
||||
dm->icm_dm.obj_id);
|
||||
if (ret)
|
||||
return ret;
|
||||
break;
|
||||
@ -6097,8 +6109,6 @@ static struct ib_counters *mlx5_ib_create_counters(struct ib_device *device,
|
||||
|
||||
static void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev)
|
||||
{
|
||||
struct mlx5_core_dev *mdev = dev->mdev;
|
||||
|
||||
mlx5_ib_cleanup_multiport_master(dev);
|
||||
if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
|
||||
srcu_barrier(&dev->mr_srcu);
|
||||
@ -6106,29 +6116,11 @@ static void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev)
|
||||
}
|
||||
|
||||
WARN_ON(!bitmap_empty(dev->dm.memic_alloc_pages, MLX5_MAX_MEMIC_PAGES));
|
||||
|
||||
WARN_ON(dev->dm.steering_sw_icm_alloc_blocks &&
|
||||
!bitmap_empty(
|
||||
dev->dm.steering_sw_icm_alloc_blocks,
|
||||
BIT(MLX5_CAP_DEV_MEM(mdev, log_steering_sw_icm_size) -
|
||||
MLX5_LOG_SW_ICM_BLOCK_SIZE(mdev))));
|
||||
|
||||
kfree(dev->dm.steering_sw_icm_alloc_blocks);
|
||||
|
||||
WARN_ON(dev->dm.header_modify_sw_icm_alloc_blocks &&
|
||||
!bitmap_empty(dev->dm.header_modify_sw_icm_alloc_blocks,
|
||||
BIT(MLX5_CAP_DEV_MEM(
|
||||
mdev, log_header_modify_sw_icm_size) -
|
||||
MLX5_LOG_SW_ICM_BLOCK_SIZE(mdev))));
|
||||
|
||||
kfree(dev->dm.header_modify_sw_icm_alloc_blocks);
|
||||
}
|
||||
|
||||
static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
|
||||
{
|
||||
struct mlx5_core_dev *mdev = dev->mdev;
|
||||
u64 header_modify_icm_blocks = 0;
|
||||
u64 steering_icm_blocks = 0;
|
||||
int err;
|
||||
int i;
|
||||
|
||||
@ -6173,51 +6165,17 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
|
||||
INIT_LIST_HEAD(&dev->qp_list);
|
||||
spin_lock_init(&dev->reset_flow_resource_lock);
|
||||
|
||||
if (MLX5_CAP_GEN_64(mdev, general_obj_types) &
|
||||
MLX5_GENERAL_OBJ_TYPES_CAP_SW_ICM) {
|
||||
if (MLX5_CAP64_DEV_MEM(mdev, steering_sw_icm_start_address)) {
|
||||
steering_icm_blocks =
|
||||
BIT(MLX5_CAP_DEV_MEM(mdev,
|
||||
log_steering_sw_icm_size) -
|
||||
MLX5_LOG_SW_ICM_BLOCK_SIZE(mdev));
|
||||
|
||||
dev->dm.steering_sw_icm_alloc_blocks =
|
||||
kcalloc(BITS_TO_LONGS(steering_icm_blocks),
|
||||
sizeof(unsigned long), GFP_KERNEL);
|
||||
if (!dev->dm.steering_sw_icm_alloc_blocks)
|
||||
goto err_mp;
|
||||
}
|
||||
|
||||
if (MLX5_CAP64_DEV_MEM(mdev,
|
||||
header_modify_sw_icm_start_address)) {
|
||||
header_modify_icm_blocks = BIT(
|
||||
MLX5_CAP_DEV_MEM(
|
||||
mdev, log_header_modify_sw_icm_size) -
|
||||
MLX5_LOG_SW_ICM_BLOCK_SIZE(mdev));
|
||||
|
||||
dev->dm.header_modify_sw_icm_alloc_blocks =
|
||||
kcalloc(BITS_TO_LONGS(header_modify_icm_blocks),
|
||||
sizeof(unsigned long), GFP_KERNEL);
|
||||
if (!dev->dm.header_modify_sw_icm_alloc_blocks)
|
||||
goto err_dm;
|
||||
}
|
||||
}
|
||||
|
||||
spin_lock_init(&dev->dm.lock);
|
||||
dev->dm.dev = mdev;
|
||||
|
||||
if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
|
||||
err = init_srcu_struct(&dev->mr_srcu);
|
||||
if (err)
|
||||
goto err_dm;
|
||||
goto err_mp;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_dm:
|
||||
kfree(dev->dm.steering_sw_icm_alloc_blocks);
|
||||
kfree(dev->dm.header_modify_sw_icm_alloc_blocks);
|
||||
|
||||
err_mp:
|
||||
mlx5_ib_cleanup_multiport_master(dev);
|
||||
|
||||
|
@ -880,8 +880,6 @@ struct mlx5_dm {
|
||||
*/
|
||||
spinlock_t lock;
|
||||
DECLARE_BITMAP(memic_alloc_pages, MLX5_MAX_MEMIC_PAGES);
|
||||
unsigned long *steering_sw_icm_alloc_blocks;
|
||||
unsigned long *header_modify_sw_icm_alloc_blocks;
|
||||
};
|
||||
|
||||
struct mlx5_read_counters_attr {
|
||||
|
@ -15,7 +15,7 @@ mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
|
||||
health.o mcg.o cq.o alloc.o qp.o port.o mr.o pd.o \
|
||||
transobj.o vport.o sriov.o fs_cmd.o fs_core.o pci_irq.o \
|
||||
fs_counters.o rl.o lag.o dev.o events.o wq.o lib/gid.o \
|
||||
lib/devcom.o lib/pci_vsc.o diag/fs_tracepoint.o \
|
||||
lib/devcom.o lib/pci_vsc.o lib/dm.o diag/fs_tracepoint.o \
|
||||
diag/fw_tracer.o diag/crdump.o devlink.o
|
||||
|
||||
#
|
||||
|
223
drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c
Normal file
223
drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c
Normal file
@ -0,0 +1,223 @@
|
||||
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
|
||||
// Copyright (c) 2019 Mellanox Technologies
|
||||
|
||||
#include <linux/mlx5/driver.h>
|
||||
#include <linux/mlx5/device.h>
|
||||
|
||||
#include "mlx5_core.h"
|
||||
#include "lib/mlx5.h"
|
||||
|
||||
struct mlx5_dm {
|
||||
/* protect access to icm bitmask */
|
||||
spinlock_t lock;
|
||||
unsigned long *steering_sw_icm_alloc_blocks;
|
||||
unsigned long *header_modify_sw_icm_alloc_blocks;
|
||||
};
|
||||
|
||||
struct mlx5_dm *mlx5_dm_create(struct mlx5_core_dev *dev)
|
||||
{
|
||||
u64 header_modify_icm_blocks = 0;
|
||||
u64 steering_icm_blocks = 0;
|
||||
struct mlx5_dm *dm;
|
||||
|
||||
if (!(MLX5_CAP_GEN_64(dev, general_obj_types) & MLX5_GENERAL_OBJ_TYPES_CAP_SW_ICM))
|
||||
return 0;
|
||||
|
||||
dm = kzalloc(sizeof(*dm), GFP_KERNEL);
|
||||
if (!dm)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
spin_lock_init(&dm->lock);
|
||||
|
||||
if (MLX5_CAP64_DEV_MEM(dev, steering_sw_icm_start_address)) {
|
||||
steering_icm_blocks =
|
||||
BIT(MLX5_CAP_DEV_MEM(dev, log_steering_sw_icm_size) -
|
||||
MLX5_LOG_SW_ICM_BLOCK_SIZE(dev));
|
||||
|
||||
dm->steering_sw_icm_alloc_blocks =
|
||||
kcalloc(BITS_TO_LONGS(steering_icm_blocks),
|
||||
sizeof(unsigned long), GFP_KERNEL);
|
||||
if (!dm->steering_sw_icm_alloc_blocks)
|
||||
goto err_steering;
|
||||
}
|
||||
|
||||
if (MLX5_CAP64_DEV_MEM(dev, header_modify_sw_icm_start_address)) {
|
||||
header_modify_icm_blocks =
|
||||
BIT(MLX5_CAP_DEV_MEM(dev, log_header_modify_sw_icm_size) -
|
||||
MLX5_LOG_SW_ICM_BLOCK_SIZE(dev));
|
||||
|
||||
dm->header_modify_sw_icm_alloc_blocks =
|
||||
kcalloc(BITS_TO_LONGS(header_modify_icm_blocks),
|
||||
sizeof(unsigned long), GFP_KERNEL);
|
||||
if (!dm->header_modify_sw_icm_alloc_blocks)
|
||||
goto err_modify_hdr;
|
||||
}
|
||||
|
||||
return dm;
|
||||
|
||||
err_modify_hdr:
|
||||
kfree(dm->steering_sw_icm_alloc_blocks);
|
||||
|
||||
err_steering:
|
||||
kfree(dm);
|
||||
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
void mlx5_dm_cleanup(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct mlx5_dm *dm = dev->dm;
|
||||
|
||||
if (!dev->dm)
|
||||
return;
|
||||
|
||||
if (dm->steering_sw_icm_alloc_blocks) {
|
||||
WARN_ON(!bitmap_empty(dm->steering_sw_icm_alloc_blocks,
|
||||
BIT(MLX5_CAP_DEV_MEM(dev, log_steering_sw_icm_size) -
|
||||
MLX5_LOG_SW_ICM_BLOCK_SIZE(dev))));
|
||||
kfree(dm->steering_sw_icm_alloc_blocks);
|
||||
}
|
||||
|
||||
if (dm->header_modify_sw_icm_alloc_blocks) {
|
||||
WARN_ON(!bitmap_empty(dm->header_modify_sw_icm_alloc_blocks,
|
||||
BIT(MLX5_CAP_DEV_MEM(dev,
|
||||
log_header_modify_sw_icm_size) -
|
||||
MLX5_LOG_SW_ICM_BLOCK_SIZE(dev))));
|
||||
kfree(dm->header_modify_sw_icm_alloc_blocks);
|
||||
}
|
||||
|
||||
kfree(dm);
|
||||
}
|
||||
|
||||
int mlx5_dm_sw_icm_alloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type,
|
||||
u64 length, u16 uid, phys_addr_t *addr, u32 *obj_id)
|
||||
{
|
||||
u32 num_blocks = DIV_ROUND_UP_ULL(length, MLX5_SW_ICM_BLOCK_SIZE(dev));
|
||||
u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {};
|
||||
u32 in[MLX5_ST_SZ_DW(create_sw_icm_in)] = {};
|
||||
struct mlx5_dm *dm = dev->dm;
|
||||
unsigned long *block_map;
|
||||
u64 icm_start_addr;
|
||||
u32 log_icm_size;
|
||||
u32 max_blocks;
|
||||
u64 block_idx;
|
||||
void *sw_icm;
|
||||
int ret;
|
||||
|
||||
if (!dev->dm)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (!length || (length & (length - 1)) ||
|
||||
length & (MLX5_SW_ICM_BLOCK_SIZE(dev) - 1))
|
||||
return -EINVAL;
|
||||
|
||||
MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
|
||||
MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
|
||||
MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_OBJ_TYPE_SW_ICM);
|
||||
MLX5_SET(general_obj_in_cmd_hdr, in, uid, uid);
|
||||
|
||||
switch (type) {
|
||||
case MLX5_SW_ICM_TYPE_STEERING:
|
||||
icm_start_addr = MLX5_CAP64_DEV_MEM(dev, steering_sw_icm_start_address);
|
||||
log_icm_size = MLX5_CAP_DEV_MEM(dev, log_steering_sw_icm_size);
|
||||
block_map = dm->steering_sw_icm_alloc_blocks;
|
||||
break;
|
||||
case MLX5_SW_ICM_TYPE_HEADER_MODIFY:
|
||||
icm_start_addr = MLX5_CAP64_DEV_MEM(dev, header_modify_sw_icm_start_address);
|
||||
log_icm_size = MLX5_CAP_DEV_MEM(dev,
|
||||
log_header_modify_sw_icm_size);
|
||||
block_map = dm->header_modify_sw_icm_alloc_blocks;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!block_map)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
max_blocks = BIT(log_icm_size - MLX5_LOG_SW_ICM_BLOCK_SIZE(dev));
|
||||
spin_lock(&dm->lock);
|
||||
block_idx = bitmap_find_next_zero_area(block_map,
|
||||
max_blocks,
|
||||
0,
|
||||
num_blocks, 0);
|
||||
|
||||
if (block_idx < max_blocks)
|
||||
bitmap_set(block_map,
|
||||
block_idx, num_blocks);
|
||||
|
||||
spin_unlock(&dm->lock);
|
||||
|
||||
if (block_idx >= max_blocks)
|
||||
return -ENOMEM;
|
||||
|
||||
sw_icm = MLX5_ADDR_OF(create_sw_icm_in, in, sw_icm);
|
||||
icm_start_addr += block_idx << MLX5_LOG_SW_ICM_BLOCK_SIZE(dev);
|
||||
MLX5_SET64(sw_icm, sw_icm, sw_icm_start_addr,
|
||||
icm_start_addr);
|
||||
MLX5_SET(sw_icm, sw_icm, log_sw_icm_size, ilog2(length));
|
||||
|
||||
ret = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
|
||||
if (ret) {
|
||||
spin_lock(&dm->lock);
|
||||
bitmap_clear(block_map,
|
||||
block_idx, num_blocks);
|
||||
spin_unlock(&dm->lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
*addr = icm_start_addr;
|
||||
*obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx5_dm_sw_icm_alloc);
|
||||
|
||||
int mlx5_dm_sw_icm_dealloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type,
|
||||
u64 length, u16 uid, phys_addr_t addr, u32 obj_id)
|
||||
{
|
||||
u32 num_blocks = DIV_ROUND_UP_ULL(length, MLX5_SW_ICM_BLOCK_SIZE(dev));
|
||||
u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {};
|
||||
u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
|
||||
struct mlx5_dm *dm = dev->dm;
|
||||
unsigned long *block_map;
|
||||
u64 icm_start_addr;
|
||||
u64 start_idx;
|
||||
int err;
|
||||
|
||||
if (!dev->dm)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
switch (type) {
|
||||
case MLX5_SW_ICM_TYPE_STEERING:
|
||||
icm_start_addr = MLX5_CAP64_DEV_MEM(dev, steering_sw_icm_start_address);
|
||||
block_map = dm->steering_sw_icm_alloc_blocks;
|
||||
break;
|
||||
case MLX5_SW_ICM_TYPE_HEADER_MODIFY:
|
||||
icm_start_addr = MLX5_CAP64_DEV_MEM(dev, header_modify_sw_icm_start_address);
|
||||
block_map = dm->header_modify_sw_icm_alloc_blocks;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
|
||||
MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
|
||||
MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_OBJ_TYPE_SW_ICM);
|
||||
MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, obj_id);
|
||||
MLX5_SET(general_obj_in_cmd_hdr, in, uid, uid);
|
||||
|
||||
err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
start_idx = (addr - icm_start_addr) >> MLX5_LOG_SW_ICM_BLOCK_SIZE(dev);
|
||||
spin_lock(&dm->lock);
|
||||
bitmap_clear(block_map,
|
||||
start_idx, num_blocks);
|
||||
spin_unlock(&dm->lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx5_dm_sw_icm_dealloc);
|
@ -879,6 +879,10 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
|
||||
goto err_eswitch_cleanup;
|
||||
}
|
||||
|
||||
dev->dm = mlx5_dm_create(dev);
|
||||
if (IS_ERR(dev->dm))
|
||||
mlx5_core_warn(dev, "Failed to init device memory%d\n", err);
|
||||
|
||||
dev->tracer = mlx5_fw_tracer_create(dev);
|
||||
|
||||
return 0;
|
||||
@ -912,6 +916,7 @@ err_devcom:
|
||||
static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
|
||||
{
|
||||
mlx5_fw_tracer_destroy(dev->tracer);
|
||||
mlx5_dm_cleanup(dev);
|
||||
mlx5_fpga_cleanup(dev);
|
||||
mlx5_eswitch_cleanup(dev->priv.eswitch);
|
||||
mlx5_sriov_cleanup(dev);
|
||||
|
@ -198,6 +198,9 @@ int mlx5_set_mtpps(struct mlx5_core_dev *mdev, u32 *mtpps, u32 mtpps_size);
|
||||
int mlx5_query_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 *arm, u8 *mode);
|
||||
int mlx5_set_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 arm, u8 mode);
|
||||
|
||||
struct mlx5_dm *mlx5_dm_create(struct mlx5_core_dev *dev);
|
||||
void mlx5_dm_cleanup(struct mlx5_core_dev *dev);
|
||||
|
||||
#define MLX5_PPS_CAP(mdev) (MLX5_CAP_GEN((mdev), pps) && \
|
||||
MLX5_CAP_GEN((mdev), pps_modify) && \
|
||||
MLX5_CAP_MCAM_FEATURE((mdev), mtpps_fs) && \
|
||||
|
@ -622,6 +622,11 @@ struct mlx5e_resources {
|
||||
struct mlx5_sq_bfreg bfreg;
|
||||
};
|
||||
|
||||
enum mlx5_sw_icm_type {
|
||||
MLX5_SW_ICM_TYPE_STEERING,
|
||||
MLX5_SW_ICM_TYPE_HEADER_MODIFY,
|
||||
};
|
||||
|
||||
#define MLX5_MAX_RESERVED_GIDS 8
|
||||
|
||||
struct mlx5_rsvd_gids {
|
||||
@ -653,10 +658,14 @@ struct mlx5_clock {
|
||||
struct mlx5_pps pps_info;
|
||||
};
|
||||
|
||||
struct mlx5_dm;
|
||||
struct mlx5_fw_tracer;
|
||||
struct mlx5_vxlan;
|
||||
struct mlx5_geneve;
|
||||
|
||||
#define MLX5_LOG_SW_ICM_BLOCK_SIZE(dev) (MLX5_CAP_DEV_MEM(dev, log_sw_icm_alloc_granularity))
|
||||
#define MLX5_SW_ICM_BLOCK_SIZE(dev) (1 << MLX5_LOG_SW_ICM_BLOCK_SIZE(dev))
|
||||
|
||||
struct mlx5_core_dev {
|
||||
struct device *device;
|
||||
enum mlx5_coredev_type coredev_type;
|
||||
@ -690,6 +699,7 @@ struct mlx5_core_dev {
|
||||
atomic_t num_qps;
|
||||
u32 issi;
|
||||
struct mlx5e_resources mlx5e_res;
|
||||
struct mlx5_dm *dm;
|
||||
struct mlx5_vxlan *vxlan;
|
||||
struct mlx5_geneve *geneve;
|
||||
struct {
|
||||
@ -1072,6 +1082,10 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
|
||||
size_t *offsets);
|
||||
struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev);
|
||||
void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up);
|
||||
int mlx5_dm_sw_icm_alloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type,
|
||||
u64 length, u16 uid, phys_addr_t *addr, u32 *obj_id);
|
||||
int mlx5_dm_sw_icm_dealloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type,
|
||||
u64 length, u16 uid, phys_addr_t addr, u32 obj_id);
|
||||
|
||||
#ifdef CONFIG_MLX5_CORE_IPOIB
|
||||
struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev,
|
||||
|
Loading…
Reference in New Issue
Block a user