mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 04:18:39 +08:00
net/mlx5: HWS, added send engine and context handling
Added implementation of send engine and handling of HWS context. Reviewed-by: Itamar Gozlan <igozlan@nvidia.com> Signed-off-by: Yevgeny Kliteynik <kliteyn@nvidia.com> Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
This commit is contained in:
parent
d4a605e968
commit
2ca62599aa
@ -0,0 +1,260 @@
|
||||
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
|
||||
/* Copyright (c) 2024 NVIDIA CORPORATION. All rights reserved. */
|
||||
|
||||
#include "mlx5hws_internal.h"
|
||||
|
||||
bool mlx5hws_context_cap_dynamic_reparse(struct mlx5hws_context *ctx)
|
||||
{
|
||||
return IS_BIT_SET(ctx->caps->rtc_reparse_mode, MLX5_IFC_RTC_REPARSE_BY_STC);
|
||||
}
|
||||
|
||||
u8 mlx5hws_context_get_reparse_mode(struct mlx5hws_context *ctx)
|
||||
{
|
||||
/* Prefer to use dynamic reparse, reparse only specific actions */
|
||||
if (mlx5hws_context_cap_dynamic_reparse(ctx))
|
||||
return MLX5_IFC_RTC_REPARSE_NEVER;
|
||||
|
||||
/* Otherwise use less efficient static */
|
||||
return MLX5_IFC_RTC_REPARSE_ALWAYS;
|
||||
}
|
||||
|
||||
static int hws_context_pools_init(struct mlx5hws_context *ctx)
|
||||
{
|
||||
struct mlx5hws_pool_attr pool_attr = {0};
|
||||
u8 max_log_sz;
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
ret = mlx5hws_pat_init_pattern_cache(&ctx->pattern_cache);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = mlx5hws_definer_init_cache(&ctx->definer_cache);
|
||||
if (ret)
|
||||
goto uninit_pat_cache;
|
||||
|
||||
/* Create an STC pool per FT type */
|
||||
pool_attr.pool_type = MLX5HWS_POOL_TYPE_STC;
|
||||
pool_attr.flags = MLX5HWS_POOL_FLAGS_FOR_STC_POOL;
|
||||
max_log_sz = min(MLX5HWS_POOL_STC_LOG_SZ, ctx->caps->stc_alloc_log_max);
|
||||
pool_attr.alloc_log_sz = max(max_log_sz, ctx->caps->stc_alloc_log_gran);
|
||||
|
||||
for (i = 0; i < MLX5HWS_TABLE_TYPE_MAX; i++) {
|
||||
pool_attr.table_type = i;
|
||||
ctx->stc_pool[i] = mlx5hws_pool_create(ctx, &pool_attr);
|
||||
if (!ctx->stc_pool[i]) {
|
||||
mlx5hws_err(ctx, "Failed to allocate STC pool [%d]", i);
|
||||
ret = -ENOMEM;
|
||||
goto free_stc_pools;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
free_stc_pools:
|
||||
for (i = 0; i < MLX5HWS_TABLE_TYPE_MAX; i++)
|
||||
if (ctx->stc_pool[i])
|
||||
mlx5hws_pool_destroy(ctx->stc_pool[i]);
|
||||
|
||||
mlx5hws_definer_uninit_cache(ctx->definer_cache);
|
||||
uninit_pat_cache:
|
||||
mlx5hws_pat_uninit_pattern_cache(ctx->pattern_cache);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void hws_context_pools_uninit(struct mlx5hws_context *ctx)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < MLX5HWS_TABLE_TYPE_MAX; i++) {
|
||||
if (ctx->stc_pool[i])
|
||||
mlx5hws_pool_destroy(ctx->stc_pool[i]);
|
||||
}
|
||||
|
||||
mlx5hws_definer_uninit_cache(ctx->definer_cache);
|
||||
mlx5hws_pat_uninit_pattern_cache(ctx->pattern_cache);
|
||||
}
|
||||
|
||||
static int hws_context_init_pd(struct mlx5hws_context *ctx)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
ret = mlx5_core_alloc_pd(ctx->mdev, &ctx->pd_num);
|
||||
if (ret) {
|
||||
mlx5hws_err(ctx, "Failed to allocate PD\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ctx->flags |= MLX5HWS_CONTEXT_FLAG_PRIVATE_PD;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hws_context_uninit_pd(struct mlx5hws_context *ctx)
|
||||
{
|
||||
if (ctx->flags & MLX5HWS_CONTEXT_FLAG_PRIVATE_PD)
|
||||
mlx5_core_dealloc_pd(ctx->mdev, ctx->pd_num);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void hws_context_check_hws_supp(struct mlx5hws_context *ctx)
|
||||
{
|
||||
struct mlx5hws_cmd_query_caps *caps = ctx->caps;
|
||||
|
||||
/* HWS not supported on device / FW */
|
||||
if (!caps->wqe_based_update) {
|
||||
mlx5hws_err(ctx, "Required HWS WQE based insertion cap not supported\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (!caps->eswitch_manager) {
|
||||
mlx5hws_err(ctx, "HWS is not supported for non eswitch manager port\n");
|
||||
return;
|
||||
}
|
||||
|
||||
/* Current solution requires all rules to set reparse bit */
|
||||
if ((!caps->nic_ft.reparse ||
|
||||
(!caps->fdb_ft.reparse && caps->eswitch_manager)) ||
|
||||
!IS_BIT_SET(caps->rtc_reparse_mode, MLX5_IFC_RTC_REPARSE_ALWAYS)) {
|
||||
mlx5hws_err(ctx, "Required HWS reparse cap not supported\n");
|
||||
return;
|
||||
}
|
||||
|
||||
/* FW/HW must support 8DW STE */
|
||||
if (!IS_BIT_SET(caps->ste_format, MLX5_IFC_RTC_STE_FORMAT_8DW)) {
|
||||
mlx5hws_err(ctx, "Required HWS STE format not supported\n");
|
||||
return;
|
||||
}
|
||||
|
||||
/* Adding rules by hash and by offset are requirements */
|
||||
if (!IS_BIT_SET(caps->rtc_index_mode, MLX5_IFC_RTC_STE_UPDATE_MODE_BY_HASH) ||
|
||||
!IS_BIT_SET(caps->rtc_index_mode, MLX5_IFC_RTC_STE_UPDATE_MODE_BY_OFFSET)) {
|
||||
mlx5hws_err(ctx, "Required HWS RTC update mode not supported\n");
|
||||
return;
|
||||
}
|
||||
|
||||
/* Support for SELECT definer ID is required */
|
||||
if (!IS_BIT_SET(caps->definer_format_sup, MLX5_IFC_DEFINER_FORMAT_ID_SELECT)) {
|
||||
mlx5hws_err(ctx, "Required HWS Dynamic definer not supported\n");
|
||||
return;
|
||||
}
|
||||
|
||||
ctx->flags |= MLX5HWS_CONTEXT_FLAG_HWS_SUPPORT;
|
||||
}
|
||||
|
||||
static int hws_context_init_hws(struct mlx5hws_context *ctx,
|
||||
struct mlx5hws_context_attr *attr)
|
||||
{
|
||||
int ret;
|
||||
|
||||
hws_context_check_hws_supp(ctx);
|
||||
|
||||
if (!(ctx->flags & MLX5HWS_CONTEXT_FLAG_HWS_SUPPORT))
|
||||
return 0;
|
||||
|
||||
ret = hws_context_init_pd(ctx);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = hws_context_pools_init(ctx);
|
||||
if (ret)
|
||||
goto uninit_pd;
|
||||
|
||||
if (attr->bwc)
|
||||
ctx->flags |= MLX5HWS_CONTEXT_FLAG_BWC_SUPPORT;
|
||||
|
||||
ret = mlx5hws_send_queues_open(ctx, attr->queues, attr->queue_size);
|
||||
if (ret)
|
||||
goto pools_uninit;
|
||||
|
||||
INIT_LIST_HEAD(&ctx->tbl_list);
|
||||
|
||||
return 0;
|
||||
|
||||
pools_uninit:
|
||||
hws_context_pools_uninit(ctx);
|
||||
uninit_pd:
|
||||
hws_context_uninit_pd(ctx);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void hws_context_uninit_hws(struct mlx5hws_context *ctx)
|
||||
{
|
||||
if (!(ctx->flags & MLX5HWS_CONTEXT_FLAG_HWS_SUPPORT))
|
||||
return;
|
||||
|
||||
mlx5hws_send_queues_close(ctx);
|
||||
hws_context_pools_uninit(ctx);
|
||||
hws_context_uninit_pd(ctx);
|
||||
}
|
||||
|
||||
struct mlx5hws_context *mlx5hws_context_open(struct mlx5_core_dev *mdev,
|
||||
struct mlx5hws_context_attr *attr)
|
||||
{
|
||||
struct mlx5hws_context *ctx;
|
||||
int ret;
|
||||
|
||||
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
|
||||
if (!ctx)
|
||||
return NULL;
|
||||
|
||||
ctx->mdev = mdev;
|
||||
|
||||
mutex_init(&ctx->ctrl_lock);
|
||||
xa_init(&ctx->peer_ctx_xa);
|
||||
|
||||
ctx->caps = kzalloc(sizeof(*ctx->caps), GFP_KERNEL);
|
||||
if (!ctx->caps)
|
||||
goto free_ctx;
|
||||
|
||||
ret = mlx5hws_cmd_query_caps(mdev, ctx->caps);
|
||||
if (ret)
|
||||
goto free_caps;
|
||||
|
||||
ret = mlx5hws_vport_init_vports(ctx);
|
||||
if (ret)
|
||||
goto free_caps;
|
||||
|
||||
ret = hws_context_init_hws(ctx, attr);
|
||||
if (ret)
|
||||
goto uninit_vports;
|
||||
|
||||
mlx5hws_debug_init_dump(ctx);
|
||||
|
||||
return ctx;
|
||||
|
||||
uninit_vports:
|
||||
mlx5hws_vport_uninit_vports(ctx);
|
||||
free_caps:
|
||||
kfree(ctx->caps);
|
||||
free_ctx:
|
||||
xa_destroy(&ctx->peer_ctx_xa);
|
||||
mutex_destroy(&ctx->ctrl_lock);
|
||||
kfree(ctx);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int mlx5hws_context_close(struct mlx5hws_context *ctx)
|
||||
{
|
||||
mlx5hws_debug_uninit_dump(ctx);
|
||||
hws_context_uninit_hws(ctx);
|
||||
mlx5hws_vport_uninit_vports(ctx);
|
||||
kfree(ctx->caps);
|
||||
xa_destroy(&ctx->peer_ctx_xa);
|
||||
mutex_destroy(&ctx->ctrl_lock);
|
||||
kfree(ctx);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void mlx5hws_context_set_peer(struct mlx5hws_context *ctx,
|
||||
struct mlx5hws_context *peer_ctx,
|
||||
u16 peer_vhca_id)
|
||||
{
|
||||
mutex_lock(&ctx->ctrl_lock);
|
||||
|
||||
if (xa_err(xa_store(&ctx->peer_ctx_xa, peer_vhca_id, peer_ctx, GFP_KERNEL)))
|
||||
pr_warn("HWS: failed storing peer vhca ID in peer xarray\n");
|
||||
|
||||
mutex_unlock(&ctx->ctrl_lock);
|
||||
}
|
@ -0,0 +1,64 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
|
||||
/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
|
||||
|
||||
#ifndef MLX5HWS_CONTEXT_H_
|
||||
#define MLX5HWS_CONTEXT_H_
|
||||
|
||||
enum mlx5hws_context_flags {
|
||||
MLX5HWS_CONTEXT_FLAG_HWS_SUPPORT = 1 << 0,
|
||||
MLX5HWS_CONTEXT_FLAG_PRIVATE_PD = 1 << 1,
|
||||
MLX5HWS_CONTEXT_FLAG_BWC_SUPPORT = 1 << 2,
|
||||
};
|
||||
|
||||
enum mlx5hws_context_shared_stc_type {
|
||||
MLX5HWS_CONTEXT_SHARED_STC_DECAP_L3 = 0,
|
||||
MLX5HWS_CONTEXT_SHARED_STC_DOUBLE_POP = 1,
|
||||
MLX5HWS_CONTEXT_SHARED_STC_MAX = 2,
|
||||
};
|
||||
|
||||
struct mlx5hws_context_common_res {
|
||||
struct mlx5hws_action_default_stc *default_stc;
|
||||
struct mlx5hws_action_shared_stc *shared_stc[MLX5HWS_CONTEXT_SHARED_STC_MAX];
|
||||
struct mlx5hws_cmd_forward_tbl *default_miss;
|
||||
};
|
||||
|
||||
struct mlx5hws_context_debug_info {
|
||||
struct dentry *steering_debugfs;
|
||||
struct dentry *fdb_debugfs;
|
||||
};
|
||||
|
||||
struct mlx5hws_context_vports {
|
||||
u16 esw_manager_gvmi;
|
||||
u16 uplink_gvmi;
|
||||
struct xarray vport_gvmi_xa;
|
||||
};
|
||||
|
||||
struct mlx5hws_context {
|
||||
struct mlx5_core_dev *mdev;
|
||||
struct mlx5hws_cmd_query_caps *caps;
|
||||
u32 pd_num;
|
||||
struct mlx5hws_pool *stc_pool[MLX5HWS_TABLE_TYPE_MAX];
|
||||
struct mlx5hws_context_common_res common_res[MLX5HWS_TABLE_TYPE_MAX];
|
||||
struct mlx5hws_pattern_cache *pattern_cache;
|
||||
struct mlx5hws_definer_cache *definer_cache;
|
||||
struct mutex ctrl_lock; /* control lock to protect the whole context */
|
||||
enum mlx5hws_context_flags flags;
|
||||
struct mlx5hws_send_engine *send_queue;
|
||||
size_t queues;
|
||||
struct mutex *bwc_send_queue_locks; /* protect BWC queues */
|
||||
struct list_head tbl_list;
|
||||
struct mlx5hws_context_debug_info debug_info;
|
||||
struct xarray peer_ctx_xa;
|
||||
struct mlx5hws_context_vports vports;
|
||||
};
|
||||
|
||||
static inline bool mlx5hws_context_bwc_supported(struct mlx5hws_context *ctx)
|
||||
{
|
||||
return ctx->flags & MLX5HWS_CONTEXT_FLAG_BWC_SUPPORT;
|
||||
}
|
||||
|
||||
bool mlx5hws_context_cap_dynamic_reparse(struct mlx5hws_context *ctx);
|
||||
|
||||
u8 mlx5hws_context_get_reparse_mode(struct mlx5hws_context *ctx);
|
||||
|
||||
#endif /* MLX5HWS_CONTEXT_H_ */
|
1209
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_send.c
Normal file
1209
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_send.c
Normal file
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,270 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
|
||||
/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
|
||||
|
||||
#ifndef MLX5HWS_SEND_H_
|
||||
#define MLX5HWS_SEND_H_
|
||||
|
||||
/* As a single operation requires at least two WQEBBS.
|
||||
* This means a maximum of 16 such operations per rule.
|
||||
*/
|
||||
#define MAX_WQES_PER_RULE 32
|
||||
|
||||
enum mlx5hws_wqe_opcode {
|
||||
MLX5HWS_WQE_OPCODE_TBL_ACCESS = 0x2c,
|
||||
};
|
||||
|
||||
enum mlx5hws_wqe_opmod {
|
||||
MLX5HWS_WQE_OPMOD_GTA_STE = 0,
|
||||
MLX5HWS_WQE_OPMOD_GTA_MOD_ARG = 1,
|
||||
};
|
||||
|
||||
enum mlx5hws_wqe_gta_opcode {
|
||||
MLX5HWS_WQE_GTA_OP_ACTIVATE = 0,
|
||||
MLX5HWS_WQE_GTA_OP_DEACTIVATE = 1,
|
||||
};
|
||||
|
||||
enum mlx5hws_wqe_gta_opmod {
|
||||
MLX5HWS_WQE_GTA_OPMOD_STE = 0,
|
||||
MLX5HWS_WQE_GTA_OPMOD_MOD_ARG = 1,
|
||||
};
|
||||
|
||||
enum mlx5hws_wqe_gta_sz {
|
||||
MLX5HWS_WQE_SZ_GTA_CTRL = 48,
|
||||
MLX5HWS_WQE_SZ_GTA_DATA = 64,
|
||||
};
|
||||
|
||||
/* WQE Control segment. */
|
||||
struct mlx5hws_wqe_ctrl_seg {
|
||||
__be32 opmod_idx_opcode;
|
||||
__be32 qpn_ds;
|
||||
__be32 flags;
|
||||
__be32 imm;
|
||||
};
|
||||
|
||||
struct mlx5hws_wqe_gta_ctrl_seg {
|
||||
__be32 op_dirix;
|
||||
__be32 stc_ix[5];
|
||||
__be32 rsvd0[6];
|
||||
};
|
||||
|
||||
struct mlx5hws_wqe_gta_data_seg_ste {
|
||||
__be32 rsvd0_ctr_id;
|
||||
__be32 rsvd1_definer;
|
||||
__be32 rsvd2[3];
|
||||
union {
|
||||
struct {
|
||||
__be32 action[3];
|
||||
__be32 tag[8];
|
||||
};
|
||||
__be32 jumbo[11];
|
||||
};
|
||||
};
|
||||
|
||||
struct mlx5hws_wqe_gta_data_seg_arg {
|
||||
__be32 action_args[8];
|
||||
};
|
||||
|
||||
struct mlx5hws_wqe_gta {
|
||||
struct mlx5hws_wqe_gta_ctrl_seg gta_ctrl;
|
||||
union {
|
||||
struct mlx5hws_wqe_gta_data_seg_ste seg_ste;
|
||||
struct mlx5hws_wqe_gta_data_seg_arg seg_arg;
|
||||
};
|
||||
};
|
||||
|
||||
struct mlx5hws_send_ring_cq {
|
||||
struct mlx5_core_dev *mdev;
|
||||
struct mlx5_cqwq wq;
|
||||
struct mlx5_wq_ctrl wq_ctrl;
|
||||
struct mlx5_core_cq mcq;
|
||||
u16 poll_wqe;
|
||||
};
|
||||
|
||||
struct mlx5hws_send_ring_priv {
|
||||
struct mlx5hws_rule *rule;
|
||||
void *user_data;
|
||||
u32 num_wqebbs;
|
||||
u32 id;
|
||||
u32 retry_id;
|
||||
u32 *used_id;
|
||||
};
|
||||
|
||||
struct mlx5hws_send_ring_dep_wqe {
|
||||
struct mlx5hws_wqe_gta_ctrl_seg wqe_ctrl;
|
||||
struct mlx5hws_wqe_gta_data_seg_ste wqe_data;
|
||||
struct mlx5hws_rule *rule;
|
||||
u32 rtc_0;
|
||||
u32 rtc_1;
|
||||
u32 retry_rtc_0;
|
||||
u32 retry_rtc_1;
|
||||
u32 direct_index;
|
||||
void *user_data;
|
||||
};
|
||||
|
||||
struct mlx5hws_send_ring_sq {
|
||||
struct mlx5_core_dev *mdev;
|
||||
u16 cur_post;
|
||||
u16 buf_mask;
|
||||
struct mlx5hws_send_ring_priv *wr_priv;
|
||||
unsigned int last_idx;
|
||||
struct mlx5hws_send_ring_dep_wqe *dep_wqe;
|
||||
unsigned int head_dep_idx;
|
||||
unsigned int tail_dep_idx;
|
||||
u32 sqn;
|
||||
struct mlx5_wq_cyc wq;
|
||||
struct mlx5_wq_ctrl wq_ctrl;
|
||||
void __iomem *uar_map;
|
||||
};
|
||||
|
||||
struct mlx5hws_send_ring {
|
||||
struct mlx5hws_send_ring_cq send_cq;
|
||||
struct mlx5hws_send_ring_sq send_sq;
|
||||
};
|
||||
|
||||
struct mlx5hws_completed_poll_entry {
|
||||
void *user_data;
|
||||
enum mlx5hws_flow_op_status status;
|
||||
};
|
||||
|
||||
struct mlx5hws_completed_poll {
|
||||
struct mlx5hws_completed_poll_entry *entries;
|
||||
u16 ci;
|
||||
u16 pi;
|
||||
u16 mask;
|
||||
};
|
||||
|
||||
struct mlx5hws_send_engine {
|
||||
struct mlx5hws_send_ring send_ring;
|
||||
struct mlx5_uars_page *uar; /* Uar is shared between rings of a queue */
|
||||
struct mlx5hws_completed_poll completed;
|
||||
u16 used_entries;
|
||||
u16 num_entries;
|
||||
bool err;
|
||||
struct mutex lock; /* Protects the send engine */
|
||||
};
|
||||
|
||||
struct mlx5hws_send_engine_post_ctrl {
|
||||
struct mlx5hws_send_engine *queue;
|
||||
struct mlx5hws_send_ring *send_ring;
|
||||
size_t num_wqebbs;
|
||||
};
|
||||
|
||||
struct mlx5hws_send_engine_post_attr {
|
||||
u8 opcode;
|
||||
u8 opmod;
|
||||
u8 notify_hw;
|
||||
u8 fence;
|
||||
u8 match_definer_id;
|
||||
u8 range_definer_id;
|
||||
size_t len;
|
||||
struct mlx5hws_rule *rule;
|
||||
u32 id;
|
||||
u32 retry_id;
|
||||
u32 *used_id;
|
||||
void *user_data;
|
||||
};
|
||||
|
||||
struct mlx5hws_send_ste_attr {
|
||||
u32 rtc_0;
|
||||
u32 rtc_1;
|
||||
u32 retry_rtc_0;
|
||||
u32 retry_rtc_1;
|
||||
u32 *used_id_rtc_0;
|
||||
u32 *used_id_rtc_1;
|
||||
bool wqe_tag_is_jumbo;
|
||||
u8 gta_opcode;
|
||||
u32 direct_index;
|
||||
struct mlx5hws_send_engine_post_attr send_attr;
|
||||
struct mlx5hws_rule_match_tag *wqe_tag;
|
||||
struct mlx5hws_rule_match_tag *range_wqe_tag;
|
||||
struct mlx5hws_wqe_gta_ctrl_seg *wqe_ctrl;
|
||||
struct mlx5hws_wqe_gta_data_seg_ste *wqe_data;
|
||||
struct mlx5hws_wqe_gta_data_seg_ste *range_wqe_data;
|
||||
};
|
||||
|
||||
struct mlx5hws_send_ring_dep_wqe *
|
||||
mlx5hws_send_add_new_dep_wqe(struct mlx5hws_send_engine *queue);
|
||||
|
||||
void mlx5hws_send_abort_new_dep_wqe(struct mlx5hws_send_engine *queue);
|
||||
|
||||
void mlx5hws_send_all_dep_wqe(struct mlx5hws_send_engine *queue);
|
||||
|
||||
void mlx5hws_send_queue_close(struct mlx5hws_send_engine *queue);
|
||||
|
||||
int mlx5hws_send_queue_open(struct mlx5hws_context *ctx,
|
||||
struct mlx5hws_send_engine *queue,
|
||||
u16 queue_size);
|
||||
|
||||
void mlx5hws_send_queues_close(struct mlx5hws_context *ctx);
|
||||
|
||||
int mlx5hws_send_queues_open(struct mlx5hws_context *ctx,
|
||||
u16 queues,
|
||||
u16 queue_size);
|
||||
|
||||
int mlx5hws_send_queue_action(struct mlx5hws_context *ctx,
|
||||
u16 queue_id,
|
||||
u32 actions);
|
||||
|
||||
int mlx5hws_send_test(struct mlx5hws_context *ctx,
|
||||
u16 queues,
|
||||
u16 queue_size);
|
||||
|
||||
struct mlx5hws_send_engine_post_ctrl
|
||||
mlx5hws_send_engine_post_start(struct mlx5hws_send_engine *queue);
|
||||
|
||||
void mlx5hws_send_engine_post_req_wqe(struct mlx5hws_send_engine_post_ctrl *ctrl,
|
||||
char **buf, size_t *len);
|
||||
|
||||
void mlx5hws_send_engine_post_end(struct mlx5hws_send_engine_post_ctrl *ctrl,
|
||||
struct mlx5hws_send_engine_post_attr *attr);
|
||||
|
||||
void mlx5hws_send_ste(struct mlx5hws_send_engine *queue,
|
||||
struct mlx5hws_send_ste_attr *ste_attr);
|
||||
|
||||
void mlx5hws_send_stes_fw(struct mlx5hws_context *ctx,
|
||||
struct mlx5hws_send_engine *queue,
|
||||
struct mlx5hws_send_ste_attr *ste_attr);
|
||||
|
||||
void mlx5hws_send_engine_flush_queue(struct mlx5hws_send_engine *queue);
|
||||
|
||||
static inline bool mlx5hws_send_engine_empty(struct mlx5hws_send_engine *queue)
|
||||
{
|
||||
struct mlx5hws_send_ring_sq *send_sq = &queue->send_ring.send_sq;
|
||||
struct mlx5hws_send_ring_cq *send_cq = &queue->send_ring.send_cq;
|
||||
|
||||
return ((send_sq->cur_post & send_sq->buf_mask) == send_cq->poll_wqe);
|
||||
}
|
||||
|
||||
static inline bool mlx5hws_send_engine_full(struct mlx5hws_send_engine *queue)
|
||||
{
|
||||
return queue->used_entries >= queue->num_entries;
|
||||
}
|
||||
|
||||
static inline void mlx5hws_send_engine_inc_rule(struct mlx5hws_send_engine *queue)
|
||||
{
|
||||
queue->used_entries++;
|
||||
}
|
||||
|
||||
static inline void mlx5hws_send_engine_dec_rule(struct mlx5hws_send_engine *queue)
|
||||
{
|
||||
queue->used_entries--;
|
||||
}
|
||||
|
||||
static inline void mlx5hws_send_engine_gen_comp(struct mlx5hws_send_engine *queue,
|
||||
void *user_data,
|
||||
int comp_status)
|
||||
{
|
||||
struct mlx5hws_completed_poll *comp = &queue->completed;
|
||||
|
||||
comp->entries[comp->pi].status = comp_status;
|
||||
comp->entries[comp->pi].user_data = user_data;
|
||||
|
||||
comp->pi = (comp->pi + 1) & comp->mask;
|
||||
}
|
||||
|
||||
static inline bool mlx5hws_send_engine_err(struct mlx5hws_send_engine *queue)
|
||||
{
|
||||
return queue->err;
|
||||
}
|
||||
|
||||
#endif /* MLX5HWS_SEND_H_ */
|
Loading…
Reference in New Issue
Block a user