mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-16 00:34:20 +08:00
mlx5-updates-2021-03-24
mlx5e netdev driver updates: 1) Some cleanups from Colin, Tariq and Saeed. 2) Aya made some trivial refactoring to cleanup and generalize PTP and RQ (Receive Queue) creation and management. Mostly code decoupling and reducing dependencies between the different RX objects in the netdev driver. This is a preparation series for upcoming PTP special RQ creation which will allow coexistence of CQE compression (important performance feature, especially in Multihost systems) and HW TS PTP. -----BEGIN PGP SIGNATURE----- iQEzBAABCAAdFiEEGhZs6bAKwk/OTgTpSD+KveBX+j4FAmBdS+oACgkQSD+KveBX +j58TQf+IETlDQGSsl/FyTFdK+d9xz5773QrO1FZg4h23kMY+GM9T0fEph0CIqjR ZZNDbzFX8DN/eywpWVWEM3ADi+j9a3JwE0JTK11R94Pmb3VrCCJJOftMMt7hdA03 YHEiZcmMrFUqIiXmUYI3mxdY1edj318ICus8AGXVyqR/dnrEXu82IxLGJ8v27Pn0 8ZBvWqyQNlvVe25OTB7POJtIFZ6X/WSDcC22BVfaRgyvD62IjxAwCuCWQX3GYbPT qjb7rnpRltJaSncsNn+O1c09y3i76btMKkUlNkvqYsn94PHlXVZE8zYg0p9Nsvg4 tgbwDY9Ng2a08+tDICx/sZ+FbaDkbw== =jAuK -----END PGP SIGNATURE----- Merge tag 'mlx5-updates-2021-03-24' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux Saeed Mahameed says: ==================== mlx5-updates-2021-03-24 mlx5e netdev driver updates: 1) Some cleanups from Colin, Tariq and Saeed. 2) Aya made some trivial refactoring to cleanup and generalize PTP and RQ (Receive Queue) creation and management. Mostly code decoupling and reducing dependencies between the different RX objects in the netdev driver. This is a preparation series for upcoming PTP special RQ creation which will allow coexistence of CQE compression (important performance feature, especially in Multihost systems) and HW TS PTP. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
026412ecac
@ -707,11 +707,11 @@ struct mlx5e_channel {
|
||||
int cpu;
|
||||
};
|
||||
|
||||
struct mlx5e_port_ptp;
|
||||
struct mlx5e_ptp;
|
||||
|
||||
struct mlx5e_channels {
|
||||
struct mlx5e_channel **c;
|
||||
struct mlx5e_port_ptp *port_ptp;
|
||||
struct mlx5e_ptp *ptp;
|
||||
unsigned int num;
|
||||
struct mlx5e_params params;
|
||||
};
|
||||
@ -726,7 +726,7 @@ struct mlx5e_channel_stats {
|
||||
struct mlx5e_xdpsq_stats xsksq;
|
||||
} ____cacheline_aligned_in_smp;
|
||||
|
||||
struct mlx5e_port_ptp_stats {
|
||||
struct mlx5e_ptp_stats {
|
||||
struct mlx5e_ch_stats ch;
|
||||
struct mlx5e_sq_stats sq[MLX5E_MAX_NUM_TC];
|
||||
struct mlx5e_ptp_cq_stats cq[MLX5E_MAX_NUM_TC];
|
||||
@ -855,10 +855,10 @@ struct mlx5e_priv {
|
||||
struct mlx5e_stats stats;
|
||||
struct mlx5e_channel_stats channel_stats[MLX5E_MAX_NUM_CHANNELS];
|
||||
struct mlx5e_channel_stats trap_stats;
|
||||
struct mlx5e_port_ptp_stats port_ptp_stats;
|
||||
struct mlx5e_ptp_stats ptp_stats;
|
||||
u16 max_nch;
|
||||
u8 max_opened_tc;
|
||||
bool port_ptp_opened;
|
||||
bool tx_ptp_opened;
|
||||
struct hwtstamp_config tstamp;
|
||||
u16 q_counter;
|
||||
u16 drop_rq_q_counter;
|
||||
@ -919,8 +919,6 @@ struct mlx5e_profile {
|
||||
void mlx5e_build_ptys2ethtool_map(void);
|
||||
|
||||
bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev);
|
||||
bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev,
|
||||
struct mlx5e_params *params);
|
||||
|
||||
void mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats);
|
||||
void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s);
|
||||
@ -963,9 +961,9 @@ struct mlx5e_tirc_config mlx5e_tirc_get_default_config(enum mlx5e_traffic_types
|
||||
struct mlx5e_xsk_param;
|
||||
|
||||
struct mlx5e_rq_param;
|
||||
int mlx5e_open_rq(struct mlx5e_channel *c, struct mlx5e_params *params,
|
||||
struct mlx5e_rq_param *param, struct mlx5e_xsk_param *xsk,
|
||||
struct xsk_buff_pool *xsk_pool, struct mlx5e_rq *rq);
|
||||
int mlx5e_open_rq(struct mlx5e_params *params, struct mlx5e_rq_param *param,
|
||||
struct mlx5e_xsk_param *xsk, int node,
|
||||
struct mlx5e_rq *rq);
|
||||
int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time);
|
||||
void mlx5e_deactivate_rq(struct mlx5e_rq *rq);
|
||||
void mlx5e_close_rq(struct mlx5e_rq *rq);
|
||||
@ -1024,14 +1022,6 @@ void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv);
|
||||
void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len,
|
||||
int num_channels);
|
||||
|
||||
void mlx5e_reset_tx_moderation(struct mlx5e_params *params, u8 cq_period_mode);
|
||||
void mlx5e_reset_rx_moderation(struct mlx5e_params *params, u8 cq_period_mode);
|
||||
void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode);
|
||||
void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode);
|
||||
|
||||
void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
|
||||
void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
|
||||
struct mlx5e_params *params);
|
||||
int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state);
|
||||
void mlx5e_activate_rq(struct mlx5e_rq *rq);
|
||||
void mlx5e_deactivate_rq(struct mlx5e_rq *rq);
|
||||
@ -1090,10 +1080,10 @@ int mlx5e_create_indirect_rqt(struct mlx5e_priv *priv);
|
||||
int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc);
|
||||
void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv);
|
||||
|
||||
int mlx5e_create_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs);
|
||||
void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs);
|
||||
int mlx5e_create_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs);
|
||||
void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs);
|
||||
int mlx5e_create_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs, int n);
|
||||
void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs, int n);
|
||||
int mlx5e_create_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs, int n);
|
||||
void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs, int n);
|
||||
void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt);
|
||||
|
||||
int mlx5e_create_tis(struct mlx5_core_dev *mdev, void *in, u32 *tisn);
|
||||
@ -1177,8 +1167,6 @@ int mlx5e_netdev_change_profile(struct mlx5e_priv *priv,
|
||||
void mlx5e_netdev_attach_nic_profile(struct mlx5e_priv *priv);
|
||||
void mlx5e_set_netdev_mtu_boundaries(struct mlx5e_priv *priv);
|
||||
void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16 mtu);
|
||||
void mlx5e_build_rq_params(struct mlx5_core_dev *mdev,
|
||||
struct mlx5e_params *params);
|
||||
void mlx5e_build_rss_params(struct mlx5e_rss_params *rss_params,
|
||||
u16 num_channels);
|
||||
void mlx5e_rx_dim_work(struct work_struct *work);
|
||||
|
@ -3,10 +3,12 @@
|
||||
|
||||
#include "en/params.h"
|
||||
#include "en/txrx.h"
|
||||
#include "en_accel/tls_rxtx.h"
|
||||
#include "en/port.h"
|
||||
#include "en_accel/en_accel.h"
|
||||
#include "accel/ipsec.h"
|
||||
|
||||
static inline bool mlx5e_rx_is_xdp(struct mlx5e_params *params,
|
||||
struct mlx5e_xsk_param *xsk)
|
||||
static bool mlx5e_rx_is_xdp(struct mlx5e_params *params,
|
||||
struct mlx5e_xsk_param *xsk)
|
||||
{
|
||||
return params->xdp_prog || xsk;
|
||||
}
|
||||
@ -37,8 +39,8 @@ u32 mlx5e_rx_get_min_frag_sz(struct mlx5e_params *params,
|
||||
return linear_rq_headroom + hw_mtu;
|
||||
}
|
||||
|
||||
u32 mlx5e_rx_get_linear_frag_sz(struct mlx5e_params *params,
|
||||
struct mlx5e_xsk_param *xsk)
|
||||
static u32 mlx5e_rx_get_linear_frag_sz(struct mlx5e_params *params,
|
||||
struct mlx5e_xsk_param *xsk)
|
||||
{
|
||||
u32 frag_sz = mlx5e_rx_get_min_frag_sz(params, xsk);
|
||||
|
||||
@ -172,17 +174,485 @@ u16 mlx5e_calc_sq_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *par
|
||||
return stop_room;
|
||||
}
|
||||
|
||||
int mlx5e_validate_params(struct mlx5e_priv *priv, struct mlx5e_params *params)
|
||||
int mlx5e_validate_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
|
||||
{
|
||||
size_t sq_size = 1 << params->log_sq_size;
|
||||
u16 stop_room;
|
||||
|
||||
stop_room = mlx5e_calc_sq_stop_room(priv->mdev, params);
|
||||
stop_room = mlx5e_calc_sq_stop_room(mdev, params);
|
||||
if (stop_room >= sq_size) {
|
||||
netdev_err(priv->netdev, "Stop room %u is bigger than the SQ size %zu\n",
|
||||
stop_room, sq_size);
|
||||
mlx5_core_err(mdev, "Stop room %u is bigger than the SQ size %zu\n",
|
||||
stop_room, sq_size);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct dim_cq_moder mlx5e_get_def_tx_moderation(u8 cq_period_mode)
|
||||
{
|
||||
struct dim_cq_moder moder;
|
||||
|
||||
moder.cq_period_mode = cq_period_mode;
|
||||
moder.pkts = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
|
||||
moder.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
|
||||
if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE)
|
||||
moder.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC_FROM_CQE;
|
||||
|
||||
return moder;
|
||||
}
|
||||
|
||||
static struct dim_cq_moder mlx5e_get_def_rx_moderation(u8 cq_period_mode)
|
||||
{
|
||||
struct dim_cq_moder moder;
|
||||
|
||||
moder.cq_period_mode = cq_period_mode;
|
||||
moder.pkts = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS;
|
||||
moder.usec = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC;
|
||||
if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE)
|
||||
moder.usec = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE;
|
||||
|
||||
return moder;
|
||||
}
|
||||
|
||||
static u8 mlx5_to_net_dim_cq_period_mode(u8 cq_period_mode)
|
||||
{
|
||||
return cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE ?
|
||||
DIM_CQ_PERIOD_MODE_START_FROM_CQE :
|
||||
DIM_CQ_PERIOD_MODE_START_FROM_EQE;
|
||||
}
|
||||
|
||||
void mlx5e_reset_tx_moderation(struct mlx5e_params *params, u8 cq_period_mode)
|
||||
{
|
||||
if (params->tx_dim_enabled) {
|
||||
u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode);
|
||||
|
||||
params->tx_cq_moderation = net_dim_get_def_tx_moderation(dim_period_mode);
|
||||
} else {
|
||||
params->tx_cq_moderation = mlx5e_get_def_tx_moderation(cq_period_mode);
|
||||
}
|
||||
}
|
||||
|
||||
void mlx5e_reset_rx_moderation(struct mlx5e_params *params, u8 cq_period_mode)
|
||||
{
|
||||
if (params->rx_dim_enabled) {
|
||||
u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode);
|
||||
|
||||
params->rx_cq_moderation = net_dim_get_def_rx_moderation(dim_period_mode);
|
||||
} else {
|
||||
params->rx_cq_moderation = mlx5e_get_def_rx_moderation(cq_period_mode);
|
||||
}
|
||||
}
|
||||
|
||||
void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
|
||||
{
|
||||
mlx5e_reset_tx_moderation(params, cq_period_mode);
|
||||
MLX5E_SET_PFLAG(params, MLX5E_PFLAG_TX_CQE_BASED_MODER,
|
||||
params->tx_cq_moderation.cq_period_mode ==
|
||||
MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
|
||||
}
|
||||
|
||||
void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
|
||||
{
|
||||
mlx5e_reset_rx_moderation(params, cq_period_mode);
|
||||
MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_BASED_MODER,
|
||||
params->rx_cq_moderation.cq_period_mode ==
|
||||
MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
|
||||
}
|
||||
|
||||
bool slow_pci_heuristic(struct mlx5_core_dev *mdev)
|
||||
{
|
||||
u32 link_speed = 0;
|
||||
u32 pci_bw = 0;
|
||||
|
||||
mlx5e_port_max_linkspeed(mdev, &link_speed);
|
||||
pci_bw = pcie_bandwidth_available(mdev->pdev, NULL, NULL, NULL);
|
||||
mlx5_core_dbg_once(mdev, "Max link speed = %d, PCI BW = %d\n",
|
||||
link_speed, pci_bw);
|
||||
|
||||
#define MLX5E_SLOW_PCI_RATIO (2)
|
||||
|
||||
return link_speed && pci_bw &&
|
||||
link_speed > MLX5E_SLOW_PCI_RATIO * pci_bw;
|
||||
}
|
||||
|
||||
bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev,
|
||||
struct mlx5e_params *params)
|
||||
{
|
||||
if (!mlx5e_check_fragmented_striding_rq_cap(mdev))
|
||||
return false;
|
||||
|
||||
if (MLX5_IPSEC_DEV(mdev))
|
||||
return false;
|
||||
|
||||
if (params->xdp_prog) {
|
||||
/* XSK params are not considered here. If striding RQ is in use,
|
||||
* and an XSK is being opened, mlx5e_rx_mpwqe_is_linear_skb will
|
||||
* be called with the known XSK params.
|
||||
*/
|
||||
if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL))
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
|
||||
struct mlx5e_params *params)
|
||||
{
|
||||
params->log_rq_mtu_frames = is_kdump_kernel() ?
|
||||
MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE :
|
||||
MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
|
||||
|
||||
mlx5_core_info(mdev, "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d)\n",
|
||||
params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ,
|
||||
params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ ?
|
||||
BIT(mlx5e_mpwqe_get_log_rq_size(params, NULL)) :
|
||||
BIT(params->log_rq_mtu_frames),
|
||||
BIT(mlx5e_mpwqe_get_log_stride_size(mdev, params, NULL)),
|
||||
MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS));
|
||||
}
|
||||
|
||||
void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
|
||||
{
|
||||
params->rq_wq_type = mlx5e_striding_rq_possible(mdev, params) &&
|
||||
MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ) ?
|
||||
MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ :
|
||||
MLX5_WQ_TYPE_CYCLIC;
|
||||
}
|
||||
|
||||
void mlx5e_build_rq_params(struct mlx5_core_dev *mdev,
|
||||
struct mlx5e_params *params)
|
||||
{
|
||||
/* Prefer Striding RQ, unless any of the following holds:
|
||||
* - Striding RQ configuration is not possible/supported.
|
||||
* - Slow PCI heuristic.
|
||||
* - Legacy RQ would use linear SKB while Striding RQ would use non-linear.
|
||||
*
|
||||
* No XSK params: checking the availability of striding RQ in general.
|
||||
*/
|
||||
if (!slow_pci_heuristic(mdev) &&
|
||||
mlx5e_striding_rq_possible(mdev, params) &&
|
||||
(mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL) ||
|
||||
!mlx5e_rx_is_linear_skb(params, NULL)))
|
||||
MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ, true);
|
||||
mlx5e_set_rq_type(mdev, params);
|
||||
mlx5e_init_rq_type_params(mdev, params);
|
||||
}
|
||||
|
||||
/* Build queue parameters */
|
||||
|
||||
void mlx5e_build_create_cq_param(struct mlx5e_create_cq_param *ccp, struct mlx5e_channel *c)
|
||||
{
|
||||
*ccp = (struct mlx5e_create_cq_param) {
|
||||
.napi = &c->napi,
|
||||
.ch_stats = c->stats,
|
||||
.node = cpu_to_node(c->cpu),
|
||||
.ix = c->ix,
|
||||
};
|
||||
}
|
||||
|
||||
#define DEFAULT_FRAG_SIZE (2048)
|
||||
|
||||
static void mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev,
|
||||
struct mlx5e_params *params,
|
||||
struct mlx5e_xsk_param *xsk,
|
||||
struct mlx5e_rq_frags_info *info)
|
||||
{
|
||||
u32 byte_count = MLX5E_SW2HW_MTU(params, params->sw_mtu);
|
||||
int frag_size_max = DEFAULT_FRAG_SIZE;
|
||||
u32 buf_size = 0;
|
||||
int i;
|
||||
|
||||
if (MLX5_IPSEC_DEV(mdev))
|
||||
byte_count += MLX5E_METADATA_ETHER_LEN;
|
||||
|
||||
if (mlx5e_rx_is_linear_skb(params, xsk)) {
|
||||
int frag_stride;
|
||||
|
||||
frag_stride = mlx5e_rx_get_linear_frag_sz(params, xsk);
|
||||
frag_stride = roundup_pow_of_two(frag_stride);
|
||||
|
||||
info->arr[0].frag_size = byte_count;
|
||||
info->arr[0].frag_stride = frag_stride;
|
||||
info->num_frags = 1;
|
||||
info->wqe_bulk = PAGE_SIZE / frag_stride;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (byte_count > PAGE_SIZE +
|
||||
(MLX5E_MAX_RX_FRAGS - 1) * frag_size_max)
|
||||
frag_size_max = PAGE_SIZE;
|
||||
|
||||
i = 0;
|
||||
while (buf_size < byte_count) {
|
||||
int frag_size = byte_count - buf_size;
|
||||
|
||||
if (i < MLX5E_MAX_RX_FRAGS - 1)
|
||||
frag_size = min(frag_size, frag_size_max);
|
||||
|
||||
info->arr[i].frag_size = frag_size;
|
||||
info->arr[i].frag_stride = roundup_pow_of_two(frag_size);
|
||||
|
||||
buf_size += frag_size;
|
||||
i++;
|
||||
}
|
||||
info->num_frags = i;
|
||||
/* number of different wqes sharing a page */
|
||||
info->wqe_bulk = 1 + (info->num_frags % 2);
|
||||
|
||||
out:
|
||||
info->wqe_bulk = max_t(u8, info->wqe_bulk, 8);
|
||||
info->log_num_frags = order_base_2(info->num_frags);
|
||||
}
|
||||
|
||||
static u8 mlx5e_get_rqwq_log_stride(u8 wq_type, int ndsegs)
|
||||
{
|
||||
int sz = sizeof(struct mlx5_wqe_data_seg) * ndsegs;
|
||||
|
||||
switch (wq_type) {
|
||||
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
|
||||
sz += sizeof(struct mlx5e_rx_wqe_ll);
|
||||
break;
|
||||
default: /* MLX5_WQ_TYPE_CYCLIC */
|
||||
sz += sizeof(struct mlx5e_rx_wqe_cyc);
|
||||
}
|
||||
|
||||
return order_base_2(sz);
|
||||
}
|
||||
|
||||
static void mlx5e_build_common_cq_param(struct mlx5_core_dev *mdev,
|
||||
struct mlx5e_cq_param *param)
|
||||
{
|
||||
void *cqc = param->cqc;
|
||||
|
||||
MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
|
||||
if (MLX5_CAP_GEN(mdev, cqe_128_always) && cache_line_size() >= 128)
|
||||
MLX5_SET(cqc, cqc, cqe_sz, CQE_STRIDE_128_PAD);
|
||||
}
|
||||
|
||||
static void mlx5e_build_rx_cq_param(struct mlx5_core_dev *mdev,
|
||||
struct mlx5e_params *params,
|
||||
struct mlx5e_xsk_param *xsk,
|
||||
struct mlx5e_cq_param *param)
|
||||
{
|
||||
bool hw_stridx = false;
|
||||
void *cqc = param->cqc;
|
||||
u8 log_cq_size;
|
||||
|
||||
switch (params->rq_wq_type) {
|
||||
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
|
||||
log_cq_size = mlx5e_mpwqe_get_log_rq_size(params, xsk) +
|
||||
mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk);
|
||||
hw_stridx = MLX5_CAP_GEN(mdev, mini_cqe_resp_stride_index);
|
||||
break;
|
||||
default: /* MLX5_WQ_TYPE_CYCLIC */
|
||||
log_cq_size = params->log_rq_mtu_frames;
|
||||
}
|
||||
|
||||
MLX5_SET(cqc, cqc, log_cq_size, log_cq_size);
|
||||
if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) {
|
||||
MLX5_SET(cqc, cqc, mini_cqe_res_format, hw_stridx ?
|
||||
MLX5_CQE_FORMAT_CSUM_STRIDX : MLX5_CQE_FORMAT_CSUM);
|
||||
MLX5_SET(cqc, cqc, cqe_comp_en, 1);
|
||||
}
|
||||
|
||||
mlx5e_build_common_cq_param(mdev, param);
|
||||
param->cq_period_mode = params->rx_cq_moderation.cq_period_mode;
|
||||
}
|
||||
|
||||
void mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
|
||||
struct mlx5e_params *params,
|
||||
struct mlx5e_xsk_param *xsk,
|
||||
u16 q_counter,
|
||||
struct mlx5e_rq_param *param)
|
||||
{
|
||||
void *rqc = param->rqc;
|
||||
void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
|
||||
int ndsegs = 1;
|
||||
|
||||
switch (params->rq_wq_type) {
|
||||
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
|
||||
MLX5_SET(wq, wq, log_wqe_num_of_strides,
|
||||
mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk) -
|
||||
MLX5_MPWQE_LOG_NUM_STRIDES_BASE);
|
||||
MLX5_SET(wq, wq, log_wqe_stride_size,
|
||||
mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk) -
|
||||
MLX5_MPWQE_LOG_STRIDE_SZ_BASE);
|
||||
MLX5_SET(wq, wq, log_wq_sz, mlx5e_mpwqe_get_log_rq_size(params, xsk));
|
||||
break;
|
||||
default: /* MLX5_WQ_TYPE_CYCLIC */
|
||||
MLX5_SET(wq, wq, log_wq_sz, params->log_rq_mtu_frames);
|
||||
mlx5e_build_rq_frags_info(mdev, params, xsk, ¶m->frags_info);
|
||||
ndsegs = param->frags_info.num_frags;
|
||||
}
|
||||
|
||||
MLX5_SET(wq, wq, wq_type, params->rq_wq_type);
|
||||
MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
|
||||
MLX5_SET(wq, wq, log_wq_stride,
|
||||
mlx5e_get_rqwq_log_stride(params->rq_wq_type, ndsegs));
|
||||
MLX5_SET(wq, wq, pd, mdev->mlx5e_res.hw_objs.pdn);
|
||||
MLX5_SET(rqc, rqc, counter_set_id, q_counter);
|
||||
MLX5_SET(rqc, rqc, vsd, params->vlan_strip_disable);
|
||||
MLX5_SET(rqc, rqc, scatter_fcs, params->scatter_fcs_en);
|
||||
|
||||
param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
|
||||
mlx5e_build_rx_cq_param(mdev, params, xsk, ¶m->cqp);
|
||||
}
|
||||
|
||||
void mlx5e_build_drop_rq_param(struct mlx5_core_dev *mdev,
|
||||
u16 q_counter,
|
||||
struct mlx5e_rq_param *param)
|
||||
{
|
||||
void *rqc = param->rqc;
|
||||
void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
|
||||
|
||||
MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
|
||||
MLX5_SET(wq, wq, log_wq_stride,
|
||||
mlx5e_get_rqwq_log_stride(MLX5_WQ_TYPE_CYCLIC, 1));
|
||||
MLX5_SET(rqc, rqc, counter_set_id, q_counter);
|
||||
|
||||
param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
|
||||
}
|
||||
|
||||
void mlx5e_build_tx_cq_param(struct mlx5_core_dev *mdev,
|
||||
struct mlx5e_params *params,
|
||||
struct mlx5e_cq_param *param)
|
||||
{
|
||||
void *cqc = param->cqc;
|
||||
|
||||
MLX5_SET(cqc, cqc, log_cq_size, params->log_sq_size);
|
||||
|
||||
mlx5e_build_common_cq_param(mdev, param);
|
||||
param->cq_period_mode = params->tx_cq_moderation.cq_period_mode;
|
||||
}
|
||||
|
||||
void mlx5e_build_sq_param_common(struct mlx5_core_dev *mdev,
|
||||
struct mlx5e_sq_param *param)
|
||||
{
|
||||
void *sqc = param->sqc;
|
||||
void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
|
||||
|
||||
MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
|
||||
MLX5_SET(wq, wq, pd, mdev->mlx5e_res.hw_objs.pdn);
|
||||
|
||||
param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
|
||||
}
|
||||
|
||||
void mlx5e_build_sq_param(struct mlx5_core_dev *mdev,
|
||||
struct mlx5e_params *params,
|
||||
struct mlx5e_sq_param *param)
|
||||
{
|
||||
void *sqc = param->sqc;
|
||||
void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
|
||||
bool allow_swp;
|
||||
|
||||
allow_swp = mlx5_geneve_tx_allowed(mdev) ||
|
||||
!!MLX5_IPSEC_DEV(mdev);
|
||||
mlx5e_build_sq_param_common(mdev, param);
|
||||
MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
|
||||
MLX5_SET(sqc, sqc, allow_swp, allow_swp);
|
||||
param->is_mpw = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_SKB_TX_MPWQE);
|
||||
param->stop_room = mlx5e_calc_sq_stop_room(mdev, params);
|
||||
mlx5e_build_tx_cq_param(mdev, params, ¶m->cqp);
|
||||
}
|
||||
|
||||
static void mlx5e_build_ico_cq_param(struct mlx5_core_dev *mdev,
|
||||
u8 log_wq_size,
|
||||
struct mlx5e_cq_param *param)
|
||||
{
|
||||
void *cqc = param->cqc;
|
||||
|
||||
MLX5_SET(cqc, cqc, log_cq_size, log_wq_size);
|
||||
|
||||
mlx5e_build_common_cq_param(mdev, param);
|
||||
|
||||
param->cq_period_mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
|
||||
}
|
||||
|
||||
static u8 mlx5e_get_rq_log_wq_sz(void *rqc)
|
||||
{
|
||||
void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
|
||||
|
||||
return MLX5_GET(wq, wq, log_wq_sz);
|
||||
}
|
||||
|
||||
static u8 mlx5e_build_icosq_log_wq_sz(struct mlx5e_params *params,
|
||||
struct mlx5e_rq_param *rqp)
|
||||
{
|
||||
switch (params->rq_wq_type) {
|
||||
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
|
||||
return max_t(u8, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE,
|
||||
order_base_2(MLX5E_UMR_WQEBBS) +
|
||||
mlx5e_get_rq_log_wq_sz(rqp->rqc));
|
||||
default: /* MLX5_WQ_TYPE_CYCLIC */
|
||||
return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
|
||||
}
|
||||
}
|
||||
|
||||
static u8 mlx5e_build_async_icosq_log_wq_sz(struct mlx5_core_dev *mdev)
|
||||
{
|
||||
if (mlx5_accel_is_ktls_rx(mdev))
|
||||
return MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
|
||||
|
||||
return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
|
||||
}
|
||||
|
||||
static void mlx5e_build_icosq_param(struct mlx5_core_dev *mdev,
|
||||
u8 log_wq_size,
|
||||
struct mlx5e_sq_param *param)
|
||||
{
|
||||
void *sqc = param->sqc;
|
||||
void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
|
||||
|
||||
mlx5e_build_sq_param_common(mdev, param);
|
||||
|
||||
MLX5_SET(wq, wq, log_wq_sz, log_wq_size);
|
||||
MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(mdev, reg_umr_sq));
|
||||
mlx5e_build_ico_cq_param(mdev, log_wq_size, ¶m->cqp);
|
||||
}
|
||||
|
||||
static void mlx5e_build_async_icosq_param(struct mlx5_core_dev *mdev,
|
||||
u8 log_wq_size,
|
||||
struct mlx5e_sq_param *param)
|
||||
{
|
||||
void *sqc = param->sqc;
|
||||
void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
|
||||
|
||||
mlx5e_build_sq_param_common(mdev, param);
|
||||
param->stop_room = mlx5e_stop_room_for_wqe(1); /* for XSK NOP */
|
||||
MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(mdev, reg_umr_sq));
|
||||
MLX5_SET(wq, wq, log_wq_sz, log_wq_size);
|
||||
mlx5e_build_ico_cq_param(mdev, log_wq_size, ¶m->cqp);
|
||||
}
|
||||
|
||||
void mlx5e_build_xdpsq_param(struct mlx5_core_dev *mdev,
|
||||
struct mlx5e_params *params,
|
||||
struct mlx5e_sq_param *param)
|
||||
{
|
||||
void *sqc = param->sqc;
|
||||
void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
|
||||
|
||||
mlx5e_build_sq_param_common(mdev, param);
|
||||
MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
|
||||
param->is_mpw = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_XDP_TX_MPWQE);
|
||||
mlx5e_build_tx_cq_param(mdev, params, ¶m->cqp);
|
||||
}
|
||||
|
||||
void mlx5e_build_channel_param(struct mlx5_core_dev *mdev,
|
||||
struct mlx5e_params *params,
|
||||
u16 q_counter,
|
||||
struct mlx5e_channel_param *cparam)
|
||||
{
|
||||
u8 icosq_log_wq_sz, async_icosq_log_wq_sz;
|
||||
|
||||
mlx5e_build_rq_param(mdev, params, NULL, q_counter, &cparam->rq);
|
||||
|
||||
icosq_log_wq_sz = mlx5e_build_icosq_log_wq_sz(params, &cparam->rq);
|
||||
async_icosq_log_wq_sz = mlx5e_build_async_icosq_log_wq_sz(mdev);
|
||||
|
||||
mlx5e_build_sq_param(mdev, params, &cparam->txq_sq);
|
||||
mlx5e_build_xdpsq_param(mdev, params, &cparam->xdp_sq);
|
||||
mlx5e_build_icosq_param(mdev, icosq_log_wq_sz, &cparam->icosq);
|
||||
mlx5e_build_async_icosq_param(mdev, async_icosq_log_wq_sz, &cparam->async_icosq);
|
||||
}
|
||||
|
@ -84,12 +84,21 @@ static inline bool mlx5e_qid_validate(const struct mlx5e_profile *profile,
|
||||
|
||||
/* Parameter calculations */
|
||||
|
||||
void mlx5e_reset_tx_moderation(struct mlx5e_params *params, u8 cq_period_mode);
|
||||
void mlx5e_reset_rx_moderation(struct mlx5e_params *params, u8 cq_period_mode);
|
||||
void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode);
|
||||
void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode);
|
||||
|
||||
bool slow_pci_heuristic(struct mlx5_core_dev *mdev);
|
||||
bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
|
||||
void mlx5e_build_rq_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
|
||||
void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
|
||||
void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
|
||||
|
||||
u16 mlx5e_get_linear_rq_headroom(struct mlx5e_params *params,
|
||||
struct mlx5e_xsk_param *xsk);
|
||||
u32 mlx5e_rx_get_min_frag_sz(struct mlx5e_params *params,
|
||||
struct mlx5e_xsk_param *xsk);
|
||||
u32 mlx5e_rx_get_linear_frag_sz(struct mlx5e_params *params,
|
||||
struct mlx5e_xsk_param *xsk);
|
||||
u8 mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5e_params *params,
|
||||
struct mlx5e_xsk_param *xsk);
|
||||
bool mlx5e_rx_is_linear_skb(struct mlx5e_params *params,
|
||||
@ -112,32 +121,31 @@ u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev,
|
||||
/* Build queue parameters */
|
||||
|
||||
void mlx5e_build_create_cq_param(struct mlx5e_create_cq_param *ccp, struct mlx5e_channel *c);
|
||||
void mlx5e_build_rq_param(struct mlx5e_priv *priv,
|
||||
void mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
|
||||
struct mlx5e_params *params,
|
||||
struct mlx5e_xsk_param *xsk,
|
||||
u16 q_counter,
|
||||
struct mlx5e_rq_param *param);
|
||||
void mlx5e_build_sq_param_common(struct mlx5e_priv *priv,
|
||||
void mlx5e_build_drop_rq_param(struct mlx5_core_dev *mdev,
|
||||
u16 q_counter,
|
||||
struct mlx5e_rq_param *param);
|
||||
void mlx5e_build_sq_param_common(struct mlx5_core_dev *mdev,
|
||||
struct mlx5e_sq_param *param);
|
||||
void mlx5e_build_sq_param(struct mlx5e_priv *priv, struct mlx5e_params *params,
|
||||
void mlx5e_build_sq_param(struct mlx5_core_dev *mdev,
|
||||
struct mlx5e_params *params,
|
||||
struct mlx5e_sq_param *param);
|
||||
void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
|
||||
struct mlx5e_params *params,
|
||||
struct mlx5e_xsk_param *xsk,
|
||||
struct mlx5e_cq_param *param);
|
||||
void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv,
|
||||
void mlx5e_build_tx_cq_param(struct mlx5_core_dev *mdev,
|
||||
struct mlx5e_params *params,
|
||||
struct mlx5e_cq_param *param);
|
||||
void mlx5e_build_ico_cq_param(struct mlx5e_priv *priv,
|
||||
u8 log_wq_size,
|
||||
struct mlx5e_cq_param *param);
|
||||
void mlx5e_build_icosq_param(struct mlx5e_priv *priv,
|
||||
u8 log_wq_size,
|
||||
struct mlx5e_sq_param *param);
|
||||
void mlx5e_build_xdpsq_param(struct mlx5e_priv *priv,
|
||||
void mlx5e_build_xdpsq_param(struct mlx5_core_dev *mdev,
|
||||
struct mlx5e_params *params,
|
||||
struct mlx5e_sq_param *param);
|
||||
void mlx5e_build_channel_param(struct mlx5_core_dev *mdev,
|
||||
struct mlx5e_params *params,
|
||||
u16 q_counter,
|
||||
struct mlx5e_channel_param *cparam);
|
||||
|
||||
u16 mlx5e_calc_sq_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
|
||||
int mlx5e_validate_params(struct mlx5e_priv *priv, struct mlx5e_params *params);
|
||||
int mlx5e_validate_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
|
||||
|
||||
#endif /* __MLX5_EN_PARAMS_H__ */
|
||||
|
@ -3,6 +3,14 @@
|
||||
|
||||
#include "en/ptp.h"
|
||||
#include "en/txrx.h"
|
||||
#include "en/params.h"
|
||||
|
||||
#define MLX5E_PTP_CHANNEL_IX 0
|
||||
|
||||
struct mlx5e_ptp_params {
|
||||
struct mlx5e_params params;
|
||||
struct mlx5e_sq_param txq_sq_param;
|
||||
};
|
||||
|
||||
struct mlx5e_skb_cb_hwtstamp {
|
||||
ktime_t cqe_hwtstamp;
|
||||
@ -116,8 +124,7 @@ static bool mlx5e_ptp_poll_ts_cq(struct mlx5e_cq *cq, int budget)
|
||||
|
||||
static int mlx5e_ptp_napi_poll(struct napi_struct *napi, int budget)
|
||||
{
|
||||
struct mlx5e_port_ptp *c = container_of(napi, struct mlx5e_port_ptp,
|
||||
napi);
|
||||
struct mlx5e_ptp *c = container_of(napi, struct mlx5e_ptp, napi);
|
||||
struct mlx5e_ch_stats *ch_stats = c->stats;
|
||||
bool busy = false;
|
||||
int work_done = 0;
|
||||
@ -153,7 +160,7 @@ out:
|
||||
return work_done;
|
||||
}
|
||||
|
||||
static int mlx5e_ptp_alloc_txqsq(struct mlx5e_port_ptp *c, int txq_ix,
|
||||
static int mlx5e_ptp_alloc_txqsq(struct mlx5e_ptp *c, int txq_ix,
|
||||
struct mlx5e_params *params,
|
||||
struct mlx5e_sq_param *param,
|
||||
struct mlx5e_txqsq *sq, int tc,
|
||||
@ -172,20 +179,18 @@ static int mlx5e_ptp_alloc_txqsq(struct mlx5e_port_ptp *c, int txq_ix,
|
||||
sq->netdev = c->netdev;
|
||||
sq->priv = c->priv;
|
||||
sq->mdev = mdev;
|
||||
sq->ch_ix = c->ix;
|
||||
sq->ch_ix = MLX5E_PTP_CHANNEL_IX;
|
||||
sq->txq_ix = txq_ix;
|
||||
sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map;
|
||||
sq->min_inline_mode = params->tx_min_inline_mode;
|
||||
sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
|
||||
sq->stats = &c->priv->port_ptp_stats.sq[tc];
|
||||
sq->stats = &c->priv->ptp_stats.sq[tc];
|
||||
sq->ptpsq = ptpsq;
|
||||
INIT_WORK(&sq->recover_work, mlx5e_tx_err_cqe_work);
|
||||
if (!MLX5_CAP_ETH(mdev, wqe_vlan_insert))
|
||||
set_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state);
|
||||
sq->stop_room = param->stop_room;
|
||||
sq->ptp_cyc2time = mlx5_is_real_time_sq(mdev) ?
|
||||
mlx5_real_time_cyc2time :
|
||||
mlx5_timecounter_cyc2time;
|
||||
sq->ptp_cyc2time = mlx5_sq_ts_translator(mdev);
|
||||
|
||||
node = dev_to_node(mlx5_core_dma_dev(mdev));
|
||||
|
||||
@ -243,7 +248,7 @@ static void mlx5e_ptp_free_traffic_db(struct mlx5e_skb_fifo *skb_fifo)
|
||||
kvfree(skb_fifo->fifo);
|
||||
}
|
||||
|
||||
static int mlx5e_ptp_open_txqsq(struct mlx5e_port_ptp *c, u32 tisn,
|
||||
static int mlx5e_ptp_open_txqsq(struct mlx5e_ptp *c, u32 tisn,
|
||||
int txq_ix, struct mlx5e_ptp_params *cparams,
|
||||
int tc, struct mlx5e_ptpsq *ptpsq)
|
||||
{
|
||||
@ -293,7 +298,7 @@ static void mlx5e_ptp_close_txqsq(struct mlx5e_ptpsq *ptpsq)
|
||||
mlx5e_free_txqsq(sq);
|
||||
}
|
||||
|
||||
static int mlx5e_ptp_open_txqsqs(struct mlx5e_port_ptp *c,
|
||||
static int mlx5e_ptp_open_txqsqs(struct mlx5e_ptp *c,
|
||||
struct mlx5e_ptp_params *cparams)
|
||||
{
|
||||
struct mlx5e_params *params = &cparams->params;
|
||||
@ -321,7 +326,7 @@ close_txqsq:
|
||||
return err;
|
||||
}
|
||||
|
||||
static void mlx5e_ptp_close_txqsqs(struct mlx5e_port_ptp *c)
|
||||
static void mlx5e_ptp_close_txqsqs(struct mlx5e_ptp *c)
|
||||
{
|
||||
int tc;
|
||||
|
||||
@ -329,7 +334,7 @@ static void mlx5e_ptp_close_txqsqs(struct mlx5e_port_ptp *c)
|
||||
mlx5e_ptp_close_txqsq(&c->ptpsq[tc]);
|
||||
}
|
||||
|
||||
static int mlx5e_ptp_open_cqs(struct mlx5e_port_ptp *c,
|
||||
static int mlx5e_ptp_open_cqs(struct mlx5e_ptp *c,
|
||||
struct mlx5e_ptp_params *cparams)
|
||||
{
|
||||
struct mlx5e_params *params = &cparams->params;
|
||||
@ -342,7 +347,7 @@ static int mlx5e_ptp_open_cqs(struct mlx5e_port_ptp *c,
|
||||
ccp.node = dev_to_node(mlx5_core_dma_dev(c->mdev));
|
||||
ccp.ch_stats = c->stats;
|
||||
ccp.napi = &c->napi;
|
||||
ccp.ix = c->ix;
|
||||
ccp.ix = MLX5E_PTP_CHANNEL_IX;
|
||||
|
||||
cq_param = &cparams->txq_sq_param.cqp;
|
||||
|
||||
@ -362,7 +367,7 @@ static int mlx5e_ptp_open_cqs(struct mlx5e_port_ptp *c,
|
||||
if (err)
|
||||
goto out_err_ts_cq;
|
||||
|
||||
ptpsq->cq_stats = &c->priv->port_ptp_stats.cq[tc];
|
||||
ptpsq->cq_stats = &c->priv->ptp_stats.cq[tc];
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -378,7 +383,7 @@ out_err_txqsq_cq:
|
||||
return err;
|
||||
}
|
||||
|
||||
static void mlx5e_ptp_close_cqs(struct mlx5e_port_ptp *c)
|
||||
static void mlx5e_ptp_close_cqs(struct mlx5e_ptp *c)
|
||||
{
|
||||
int tc;
|
||||
|
||||
@ -389,22 +394,22 @@ static void mlx5e_ptp_close_cqs(struct mlx5e_port_ptp *c)
|
||||
mlx5e_close_cq(&c->ptpsq[tc].txqsq.cq);
|
||||
}
|
||||
|
||||
static void mlx5e_ptp_build_sq_param(struct mlx5e_priv *priv,
|
||||
static void mlx5e_ptp_build_sq_param(struct mlx5_core_dev *mdev,
|
||||
struct mlx5e_params *params,
|
||||
struct mlx5e_sq_param *param)
|
||||
{
|
||||
void *sqc = param->sqc;
|
||||
void *wq;
|
||||
|
||||
mlx5e_build_sq_param_common(priv, param);
|
||||
mlx5e_build_sq_param_common(mdev, param);
|
||||
|
||||
wq = MLX5_ADDR_OF(sqc, sqc, wq);
|
||||
MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
|
||||
param->stop_room = mlx5e_stop_room_for_wqe(MLX5_SEND_WQE_MAX_WQEBBS);
|
||||
mlx5e_build_tx_cq_param(priv, params, ¶m->cqp);
|
||||
mlx5e_build_tx_cq_param(mdev, params, ¶m->cqp);
|
||||
}
|
||||
|
||||
static void mlx5e_ptp_build_params(struct mlx5e_port_ptp *c,
|
||||
static void mlx5e_ptp_build_params(struct mlx5e_ptp *c,
|
||||
struct mlx5e_ptp_params *cparams,
|
||||
struct mlx5e_params *orig)
|
||||
{
|
||||
@ -419,10 +424,10 @@ static void mlx5e_ptp_build_params(struct mlx5e_port_ptp *c,
|
||||
/* SQ */
|
||||
params->log_sq_size = orig->log_sq_size;
|
||||
|
||||
mlx5e_ptp_build_sq_param(c->priv, params, &cparams->txq_sq_param);
|
||||
mlx5e_ptp_build_sq_param(c->mdev, params, &cparams->txq_sq_param);
|
||||
}
|
||||
|
||||
static int mlx5e_ptp_open_queues(struct mlx5e_port_ptp *c,
|
||||
static int mlx5e_ptp_open_queues(struct mlx5e_ptp *c,
|
||||
struct mlx5e_ptp_params *cparams)
|
||||
{
|
||||
int err;
|
||||
@ -443,26 +448,21 @@ close_cqs:
|
||||
return err;
|
||||
}
|
||||
|
||||
static void mlx5e_ptp_close_queues(struct mlx5e_port_ptp *c)
|
||||
static void mlx5e_ptp_close_queues(struct mlx5e_ptp *c)
|
||||
{
|
||||
mlx5e_ptp_close_txqsqs(c);
|
||||
mlx5e_ptp_close_cqs(c);
|
||||
}
|
||||
|
||||
int mlx5e_port_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params,
|
||||
u8 lag_port, struct mlx5e_port_ptp **cp)
|
||||
int mlx5e_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params,
|
||||
u8 lag_port, struct mlx5e_ptp **cp)
|
||||
{
|
||||
struct net_device *netdev = priv->netdev;
|
||||
struct mlx5_core_dev *mdev = priv->mdev;
|
||||
struct mlx5e_ptp_params *cparams;
|
||||
struct mlx5e_port_ptp *c;
|
||||
unsigned int irq;
|
||||
struct mlx5e_ptp *c;
|
||||
int err;
|
||||
int eqn;
|
||||
|
||||
err = mlx5_vector2eqn(priv->mdev, 0, &eqn, &irq);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
c = kvzalloc_node(sizeof(*c), GFP_KERNEL, dev_to_node(mlx5_core_dma_dev(mdev)));
|
||||
cparams = kvzalloc(sizeof(*cparams), GFP_KERNEL);
|
||||
@ -472,12 +472,11 @@ int mlx5e_port_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params,
|
||||
c->priv = priv;
|
||||
c->mdev = priv->mdev;
|
||||
c->tstamp = &priv->tstamp;
|
||||
c->ix = 0;
|
||||
c->pdev = mlx5_core_dma_dev(priv->mdev);
|
||||
c->netdev = priv->netdev;
|
||||
c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.hw_objs.mkey.key);
|
||||
c->num_tc = params->num_tc;
|
||||
c->stats = &priv->port_ptp_stats.ch;
|
||||
c->stats = &priv->ptp_stats.ch;
|
||||
c->lag_port = lag_port;
|
||||
|
||||
netif_napi_add(netdev, &c->napi, mlx5e_ptp_napi_poll, 64);
|
||||
@ -502,7 +501,7 @@ err_napi_del:
|
||||
return err;
|
||||
}
|
||||
|
||||
void mlx5e_port_ptp_close(struct mlx5e_port_ptp *c)
|
||||
void mlx5e_ptp_close(struct mlx5e_ptp *c)
|
||||
{
|
||||
mlx5e_ptp_close_queues(c);
|
||||
netif_napi_del(&c->napi);
|
||||
@ -510,7 +509,7 @@ void mlx5e_port_ptp_close(struct mlx5e_port_ptp *c)
|
||||
kvfree(c);
|
||||
}
|
||||
|
||||
void mlx5e_ptp_activate_channel(struct mlx5e_port_ptp *c)
|
||||
void mlx5e_ptp_activate_channel(struct mlx5e_ptp *c)
|
||||
{
|
||||
int tc;
|
||||
|
||||
@ -520,7 +519,7 @@ void mlx5e_ptp_activate_channel(struct mlx5e_port_ptp *c)
|
||||
mlx5e_activate_txqsq(&c->ptpsq[tc].txqsq);
|
||||
}
|
||||
|
||||
void mlx5e_ptp_deactivate_channel(struct mlx5e_port_ptp *c)
|
||||
void mlx5e_ptp_deactivate_channel(struct mlx5e_ptp *c)
|
||||
{
|
||||
int tc;
|
||||
|
||||
|
@ -5,7 +5,6 @@
|
||||
#define __MLX5_EN_PTP_H__
|
||||
|
||||
#include "en.h"
|
||||
#include "en/params.h"
|
||||
#include "en_stats.h"
|
||||
|
||||
struct mlx5e_ptpsq {
|
||||
@ -17,7 +16,7 @@ struct mlx5e_ptpsq {
|
||||
struct mlx5e_ptp_cq_stats *cq_stats;
|
||||
};
|
||||
|
||||
struct mlx5e_port_ptp {
|
||||
struct mlx5e_ptp {
|
||||
/* data path */
|
||||
struct mlx5e_ptpsq ptpsq[MLX5E_MAX_NUM_TC];
|
||||
struct napi_struct napi;
|
||||
@ -34,20 +33,13 @@ struct mlx5e_port_ptp {
|
||||
struct mlx5e_priv *priv;
|
||||
struct mlx5_core_dev *mdev;
|
||||
struct hwtstamp_config *tstamp;
|
||||
DECLARE_BITMAP(state, MLX5E_CHANNEL_NUM_STATES);
|
||||
int ix;
|
||||
};
|
||||
|
||||
struct mlx5e_ptp_params {
|
||||
struct mlx5e_params params;
|
||||
struct mlx5e_sq_param txq_sq_param;
|
||||
};
|
||||
|
||||
int mlx5e_port_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params,
|
||||
u8 lag_port, struct mlx5e_port_ptp **cp);
|
||||
void mlx5e_port_ptp_close(struct mlx5e_port_ptp *c);
|
||||
void mlx5e_ptp_activate_channel(struct mlx5e_port_ptp *c);
|
||||
void mlx5e_ptp_deactivate_channel(struct mlx5e_port_ptp *c);
|
||||
int mlx5e_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params,
|
||||
u8 lag_port, struct mlx5e_ptp **cp);
|
||||
void mlx5e_ptp_close(struct mlx5e_ptp *c);
|
||||
void mlx5e_ptp_activate_channel(struct mlx5e_ptp *c);
|
||||
void mlx5e_ptp_deactivate_channel(struct mlx5e_ptp *c);
|
||||
|
||||
enum {
|
||||
MLX5E_SKB_CB_CQE_HWTSTAMP = BIT(0),
|
||||
|
@ -232,8 +232,8 @@ static int mlx5e_open_qos_sq(struct mlx5e_priv *priv, struct mlx5e_channels *chs
|
||||
|
||||
memset(¶m_sq, 0, sizeof(param_sq));
|
||||
memset(¶m_cq, 0, sizeof(param_cq));
|
||||
mlx5e_build_sq_param(priv, params, ¶m_sq);
|
||||
mlx5e_build_tx_cq_param(priv, params, ¶m_cq);
|
||||
mlx5e_build_sq_param(priv->mdev, params, ¶m_sq);
|
||||
mlx5e_build_tx_cq_param(priv->mdev, params, ¶m_cq);
|
||||
err = mlx5e_open_cq(priv, params->tx_cq_moderation, ¶m_cq, &ccp, &sq->cq);
|
||||
if (err)
|
||||
goto err_free_sq;
|
||||
|
@ -315,8 +315,8 @@ mlx5e_tx_reporter_diagnose_common_config(struct devlink_health_reporter *reporte
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
generic_ptpsq = priv->channels.port_ptp ?
|
||||
&priv->channels.port_ptp->ptpsq[0] :
|
||||
generic_ptpsq = priv->channels.ptp ?
|
||||
&priv->channels.ptp->ptpsq[0] :
|
||||
NULL;
|
||||
if (!generic_ptpsq)
|
||||
goto out;
|
||||
@ -346,7 +346,7 @@ static int mlx5e_tx_reporter_diagnose(struct devlink_health_reporter *reporter,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
|
||||
struct mlx5e_port_ptp *ptp_ch = priv->channels.port_ptp;
|
||||
struct mlx5e_ptp *ptp_ch = priv->channels.ptp;
|
||||
|
||||
int i, tc, err = 0;
|
||||
|
||||
@ -460,7 +460,7 @@ static int mlx5e_tx_reporter_dump_sq(struct mlx5e_priv *priv, struct devlink_fms
|
||||
static int mlx5e_tx_reporter_dump_all_sqs(struct mlx5e_priv *priv,
|
||||
struct devlink_fmsg *fmsg)
|
||||
{
|
||||
struct mlx5e_port_ptp *ptp_ch = priv->channels.port_ptp;
|
||||
struct mlx5e_ptp *ptp_ch = priv->channels.ptp;
|
||||
struct mlx5_rsc_key key = {};
|
||||
int i, tc, err;
|
||||
|
||||
|
@ -30,172 +30,62 @@ static int mlx5e_trap_napi_poll(struct napi_struct *napi, int budget)
|
||||
return work_done;
|
||||
}
|
||||
|
||||
static int mlx5e_alloc_trap_rq(struct mlx5e_priv *priv, struct mlx5e_rq_param *rqp,
|
||||
struct mlx5e_rq_stats *stats, struct mlx5e_params *params,
|
||||
struct mlx5e_ch_stats *ch_stats,
|
||||
static void mlx5e_init_trap_rq(struct mlx5e_trap *t, struct mlx5e_params *params,
|
||||
struct mlx5e_rq *rq)
|
||||
{
|
||||
void *rqc_wq = MLX5_ADDR_OF(rqc, rqp->rqc, wq);
|
||||
struct mlx5_core_dev *mdev = priv->mdev;
|
||||
struct page_pool_params pp_params = {};
|
||||
int node = dev_to_node(mdev->device);
|
||||
u32 pool_size;
|
||||
int wq_sz;
|
||||
int err;
|
||||
int i;
|
||||
|
||||
rqp->wq.db_numa_node = node;
|
||||
|
||||
rq->wq_type = params->rq_wq_type;
|
||||
rq->pdev = mdev->device;
|
||||
rq->netdev = priv->netdev;
|
||||
rq->mdev = mdev;
|
||||
rq->priv = priv;
|
||||
rq->stats = stats;
|
||||
rq->clock = &mdev->clock;
|
||||
rq->tstamp = &priv->tstamp;
|
||||
rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
|
||||
struct mlx5_core_dev *mdev = t->mdev;
|
||||
struct mlx5e_priv *priv = t->priv;
|
||||
|
||||
rq->wq_type = params->rq_wq_type;
|
||||
rq->pdev = mdev->device;
|
||||
rq->netdev = priv->netdev;
|
||||
rq->priv = priv;
|
||||
rq->clock = &mdev->clock;
|
||||
rq->tstamp = &priv->tstamp;
|
||||
rq->mdev = mdev;
|
||||
rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
|
||||
rq->stats = &priv->trap_stats.rq;
|
||||
rq->ptp_cyc2time = mlx5_rq_ts_translator(mdev);
|
||||
xdp_rxq_info_unused(&rq->xdp_rxq);
|
||||
|
||||
rq->buff.map_dir = DMA_FROM_DEVICE;
|
||||
rq->buff.headroom = mlx5e_get_rq_headroom(mdev, params, NULL);
|
||||
pool_size = 1 << params->log_rq_mtu_frames;
|
||||
|
||||
err = mlx5_wq_cyc_create(mdev, &rqp->wq, rqc_wq, &rq->wqe.wq, &rq->wq_ctrl);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
rq->wqe.wq.db = &rq->wqe.wq.db[MLX5_RCV_DBR];
|
||||
|
||||
wq_sz = mlx5_wq_cyc_get_size(&rq->wqe.wq);
|
||||
|
||||
rq->wqe.info = rqp->frags_info;
|
||||
rq->buff.frame0_sz = rq->wqe.info.arr[0].frag_stride;
|
||||
rq->wqe.frags = kvzalloc_node(array_size(sizeof(*rq->wqe.frags),
|
||||
(wq_sz << rq->wqe.info.log_num_frags)),
|
||||
GFP_KERNEL, node);
|
||||
if (!rq->wqe.frags) {
|
||||
err = -ENOMEM;
|
||||
goto err_wq_cyc_destroy;
|
||||
}
|
||||
|
||||
err = mlx5e_init_di_list(rq, wq_sz, node);
|
||||
if (err)
|
||||
goto err_free_frags;
|
||||
|
||||
rq->mkey_be = cpu_to_be32(mdev->mlx5e_res.hw_objs.mkey.key);
|
||||
|
||||
mlx5e_rq_set_trap_handlers(rq, params);
|
||||
|
||||
/* Create a page_pool and register it with rxq */
|
||||
pp_params.order = 0;
|
||||
pp_params.flags = 0; /* No-internal DMA mapping in page_pool */
|
||||
pp_params.pool_size = pool_size;
|
||||
pp_params.nid = node;
|
||||
pp_params.dev = mdev->device;
|
||||
pp_params.dma_dir = rq->buff.map_dir;
|
||||
|
||||
/* page_pool can be used even when there is no rq->xdp_prog,
|
||||
* given page_pool does not handle DMA mapping there is no
|
||||
* required state to clear. And page_pool gracefully handle
|
||||
* elevated refcnt.
|
||||
*/
|
||||
rq->page_pool = page_pool_create(&pp_params);
|
||||
if (IS_ERR(rq->page_pool)) {
|
||||
err = PTR_ERR(rq->page_pool);
|
||||
rq->page_pool = NULL;
|
||||
goto err_free_di_list;
|
||||
}
|
||||
for (i = 0; i < wq_sz; i++) {
|
||||
struct mlx5e_rx_wqe_cyc *wqe =
|
||||
mlx5_wq_cyc_get_wqe(&rq->wqe.wq, i);
|
||||
int f;
|
||||
|
||||
for (f = 0; f < rq->wqe.info.num_frags; f++) {
|
||||
u32 frag_size = rq->wqe.info.arr[f].frag_size |
|
||||
MLX5_HW_START_PADDING;
|
||||
|
||||
wqe->data[f].byte_count = cpu_to_be32(frag_size);
|
||||
wqe->data[f].lkey = rq->mkey_be;
|
||||
}
|
||||
/* check if num_frags is not a pow of two */
|
||||
if (rq->wqe.info.num_frags < (1 << rq->wqe.info.log_num_frags)) {
|
||||
wqe->data[f].byte_count = 0;
|
||||
wqe->data[f].lkey = cpu_to_be32(MLX5_INVALID_LKEY);
|
||||
wqe->data[f].addr = 0;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
|
||||
err_free_di_list:
|
||||
mlx5e_free_di_list(rq);
|
||||
err_free_frags:
|
||||
kvfree(rq->wqe.frags);
|
||||
err_wq_cyc_destroy:
|
||||
mlx5_wq_destroy(&rq->wq_ctrl);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static void mlx5e_free_trap_rq(struct mlx5e_rq *rq)
|
||||
{
|
||||
page_pool_destroy(rq->page_pool);
|
||||
mlx5e_free_di_list(rq);
|
||||
kvfree(rq->wqe.frags);
|
||||
mlx5_wq_destroy(&rq->wq_ctrl);
|
||||
}
|
||||
|
||||
static int mlx5e_open_trap_rq(struct mlx5e_priv *priv, struct napi_struct *napi,
|
||||
struct mlx5e_rq_stats *stats, struct mlx5e_params *params,
|
||||
struct mlx5e_rq_param *rq_param,
|
||||
struct mlx5e_ch_stats *ch_stats,
|
||||
struct mlx5e_rq *rq)
|
||||
static int mlx5e_open_trap_rq(struct mlx5e_priv *priv, struct mlx5e_trap *t)
|
||||
{
|
||||
struct mlx5e_rq_param *rq_param = &t->rq_param;
|
||||
struct mlx5_core_dev *mdev = priv->mdev;
|
||||
struct mlx5e_create_cq_param ccp = {};
|
||||
struct dim_cq_moder trap_moder = {};
|
||||
struct mlx5e_cq *cq = &rq->cq;
|
||||
struct mlx5e_rq *rq = &t->rq;
|
||||
int node;
|
||||
int err;
|
||||
|
||||
ccp.node = dev_to_node(mdev->device);
|
||||
ccp.ch_stats = ch_stats;
|
||||
ccp.napi = napi;
|
||||
node = dev_to_node(mdev->device);
|
||||
|
||||
ccp.node = node;
|
||||
ccp.ch_stats = t->stats;
|
||||
ccp.napi = &t->napi;
|
||||
ccp.ix = 0;
|
||||
err = mlx5e_open_cq(priv, trap_moder, &rq_param->cqp, &ccp, cq);
|
||||
err = mlx5e_open_cq(priv, trap_moder, &rq_param->cqp, &ccp, &rq->cq);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = mlx5e_alloc_trap_rq(priv, rq_param, stats, params, ch_stats, rq);
|
||||
mlx5e_init_trap_rq(t, &t->params, rq);
|
||||
err = mlx5e_open_rq(&t->params, rq_param, NULL, node, rq);
|
||||
if (err)
|
||||
goto err_destroy_cq;
|
||||
|
||||
err = mlx5e_create_rq(rq, rq_param);
|
||||
if (err)
|
||||
goto err_free_rq;
|
||||
|
||||
err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
|
||||
if (err)
|
||||
goto err_destroy_rq;
|
||||
|
||||
return 0;
|
||||
|
||||
err_destroy_rq:
|
||||
mlx5e_destroy_rq(rq);
|
||||
mlx5e_free_rx_descs(rq);
|
||||
err_free_rq:
|
||||
mlx5e_free_trap_rq(rq);
|
||||
err_destroy_cq:
|
||||
mlx5e_close_cq(cq);
|
||||
mlx5e_close_cq(&rq->cq);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static void mlx5e_close_trap_rq(struct mlx5e_rq *rq)
|
||||
{
|
||||
mlx5e_destroy_rq(rq);
|
||||
mlx5e_free_rx_descs(rq);
|
||||
mlx5e_free_trap_rq(rq);
|
||||
mlx5e_close_rq(rq);
|
||||
mlx5e_close_cq(&rq->cq);
|
||||
}
|
||||
|
||||
@ -228,24 +118,16 @@ static void mlx5e_destroy_trap_direct_rq_tir(struct mlx5_core_dev *mdev, struct
|
||||
mlx5e_destroy_tir(mdev, tir);
|
||||
}
|
||||
|
||||
static void mlx5e_activate_trap_rq(struct mlx5e_rq *rq)
|
||||
{
|
||||
set_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
|
||||
}
|
||||
|
||||
static void mlx5e_deactivate_trap_rq(struct mlx5e_rq *rq)
|
||||
{
|
||||
clear_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
|
||||
}
|
||||
|
||||
static void mlx5e_build_trap_params(struct mlx5e_priv *priv, struct mlx5e_trap *t)
|
||||
static void mlx5e_build_trap_params(struct mlx5_core_dev *mdev,
|
||||
int max_mtu, u16 q_counter,
|
||||
struct mlx5e_trap *t)
|
||||
{
|
||||
struct mlx5e_params *params = &t->params;
|
||||
|
||||
params->rq_wq_type = MLX5_WQ_TYPE_CYCLIC;
|
||||
mlx5e_init_rq_type_params(priv->mdev, params);
|
||||
params->sw_mtu = priv->netdev->max_mtu;
|
||||
mlx5e_build_rq_param(priv, params, NULL, &t->rq_param);
|
||||
mlx5e_init_rq_type_params(mdev, params);
|
||||
params->sw_mtu = max_mtu;
|
||||
mlx5e_build_rq_param(mdev, params, NULL, q_counter, &t->rq_param);
|
||||
}
|
||||
|
||||
static struct mlx5e_trap *mlx5e_open_trap(struct mlx5e_priv *priv)
|
||||
@ -259,7 +141,7 @@ static struct mlx5e_trap *mlx5e_open_trap(struct mlx5e_priv *priv)
|
||||
if (!t)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
mlx5e_build_trap_params(priv, t);
|
||||
mlx5e_build_trap_params(priv->mdev, netdev->max_mtu, priv->q_counter, t);
|
||||
|
||||
t->priv = priv;
|
||||
t->mdev = priv->mdev;
|
||||
@ -271,11 +153,7 @@ static struct mlx5e_trap *mlx5e_open_trap(struct mlx5e_priv *priv)
|
||||
|
||||
netif_napi_add(netdev, &t->napi, mlx5e_trap_napi_poll, 64);
|
||||
|
||||
err = mlx5e_open_trap_rq(priv, &t->napi,
|
||||
&priv->trap_stats.rq,
|
||||
&t->params, &t->rq_param,
|
||||
&priv->trap_stats.ch,
|
||||
&t->rq);
|
||||
err = mlx5e_open_trap_rq(priv, t);
|
||||
if (unlikely(err))
|
||||
goto err_napi_del;
|
||||
|
||||
@ -304,15 +182,14 @@ void mlx5e_close_trap(struct mlx5e_trap *trap)
|
||||
static void mlx5e_activate_trap(struct mlx5e_trap *trap)
|
||||
{
|
||||
napi_enable(&trap->napi);
|
||||
mlx5e_activate_trap_rq(&trap->rq);
|
||||
napi_schedule(&trap->napi);
|
||||
mlx5e_activate_rq(&trap->rq);
|
||||
}
|
||||
|
||||
void mlx5e_deactivate_trap(struct mlx5e_priv *priv)
|
||||
{
|
||||
struct mlx5e_trap *trap = priv->en_trap;
|
||||
|
||||
mlx5e_deactivate_trap_rq(&trap->rq);
|
||||
mlx5e_deactivate_rq(&trap->rq);
|
||||
napi_disable(&trap->napi);
|
||||
}
|
||||
|
||||
|
@ -35,13 +35,59 @@ bool mlx5e_validate_xsk_param(struct mlx5e_params *params,
|
||||
}
|
||||
}
|
||||
|
||||
static void mlx5e_build_xsk_cparam(struct mlx5e_priv *priv,
|
||||
static void mlx5e_build_xsk_cparam(struct mlx5_core_dev *mdev,
|
||||
struct mlx5e_params *params,
|
||||
struct mlx5e_xsk_param *xsk,
|
||||
u16 q_counter,
|
||||
struct mlx5e_channel_param *cparam)
|
||||
{
|
||||
mlx5e_build_rq_param(priv, params, xsk, &cparam->rq);
|
||||
mlx5e_build_xdpsq_param(priv, params, &cparam->xdp_sq);
|
||||
mlx5e_build_rq_param(mdev, params, xsk, q_counter, &cparam->rq);
|
||||
mlx5e_build_xdpsq_param(mdev, params, &cparam->xdp_sq);
|
||||
}
|
||||
|
||||
static int mlx5e_init_xsk_rq(struct mlx5e_channel *c,
|
||||
struct mlx5e_params *params,
|
||||
struct xsk_buff_pool *pool,
|
||||
struct mlx5e_xsk_param *xsk,
|
||||
struct mlx5e_rq *rq)
|
||||
{
|
||||
struct mlx5_core_dev *mdev = c->mdev;
|
||||
int rq_xdp_ix;
|
||||
int err;
|
||||
|
||||
rq->wq_type = params->rq_wq_type;
|
||||
rq->pdev = c->pdev;
|
||||
rq->netdev = c->netdev;
|
||||
rq->priv = c->priv;
|
||||
rq->tstamp = c->tstamp;
|
||||
rq->clock = &mdev->clock;
|
||||
rq->icosq = &c->icosq;
|
||||
rq->ix = c->ix;
|
||||
rq->mdev = mdev;
|
||||
rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
|
||||
rq->xdpsq = &c->rq_xdpsq;
|
||||
rq->xsk_pool = pool;
|
||||
rq->stats = &c->priv->channel_stats[c->ix].xskrq;
|
||||
rq->ptp_cyc2time = mlx5_rq_ts_translator(mdev);
|
||||
rq_xdp_ix = c->ix + params->num_channels * MLX5E_RQ_GROUP_XSK;
|
||||
err = mlx5e_rq_set_handlers(rq, params, xsk);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
return xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq_xdp_ix, 0);
|
||||
}
|
||||
|
||||
static int mlx5e_open_xsk_rq(struct mlx5e_channel *c, struct mlx5e_params *params,
|
||||
struct mlx5e_rq_param *rq_params, struct xsk_buff_pool *pool,
|
||||
struct mlx5e_xsk_param *xsk)
|
||||
{
|
||||
int err;
|
||||
|
||||
err = mlx5e_init_xsk_rq(c, params, pool, xsk, &c->xskrq);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
return mlx5e_open_rq(params, rq_params, xsk, cpu_to_node(c->cpu), &c->xskrq);
|
||||
}
|
||||
|
||||
int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params,
|
||||
@ -61,14 +107,14 @@ int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params,
|
||||
if (!cparam)
|
||||
return -ENOMEM;
|
||||
|
||||
mlx5e_build_xsk_cparam(priv, params, xsk, cparam);
|
||||
mlx5e_build_xsk_cparam(priv->mdev, params, xsk, priv->q_counter, cparam);
|
||||
|
||||
err = mlx5e_open_cq(c->priv, params->rx_cq_moderation, &cparam->rq.cqp, &ccp,
|
||||
&c->xskrq.cq);
|
||||
if (unlikely(err))
|
||||
goto err_free_cparam;
|
||||
|
||||
err = mlx5e_open_rq(c, params, &cparam->rq, xsk, pool, &c->xskrq);
|
||||
err = mlx5e_open_xsk_rq(c, params, &cparam->rq, pool, xsk);
|
||||
if (unlikely(err))
|
||||
goto err_close_rx_cq;
|
||||
|
||||
|
@ -368,7 +368,7 @@ int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv,
|
||||
new_channels.params.log_rq_mtu_frames = log_rq_size;
|
||||
new_channels.params.log_sq_size = log_sq_size;
|
||||
|
||||
err = mlx5e_validate_params(priv, &new_channels.params);
|
||||
err = mlx5e_validate_params(priv->mdev, &new_channels.params);
|
||||
if (err)
|
||||
goto unlock;
|
||||
|
||||
@ -2032,7 +2032,7 @@ static int set_pflag_tx_port_ts(struct net_device *netdev, bool enable)
|
||||
mlx5e_num_channels_changed_ctx, NULL);
|
||||
out:
|
||||
if (!err)
|
||||
priv->port_ptp_opened = true;
|
||||
priv->tx_ptp_opened = true;
|
||||
|
||||
return err;
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -40,6 +40,7 @@
|
||||
#include "eswitch.h"
|
||||
#include "en.h"
|
||||
#include "en_rep.h"
|
||||
#include "en/params.h"
|
||||
#include "en/txrx.h"
|
||||
#include "en_tc.h"
|
||||
#include "en/rep/tc.h"
|
||||
@ -752,6 +753,7 @@ int mlx5e_rep_bond_update(struct mlx5e_priv *priv, bool cleanup)
|
||||
static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
|
||||
{
|
||||
struct mlx5_core_dev *mdev = priv->mdev;
|
||||
u16 max_nch = priv->max_nch;
|
||||
int err;
|
||||
|
||||
mlx5e_init_l2_addr(priv);
|
||||
@ -766,7 +768,7 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
|
||||
if (err)
|
||||
goto err_close_drop_rq;
|
||||
|
||||
err = mlx5e_create_direct_rqts(priv, priv->direct_tir);
|
||||
err = mlx5e_create_direct_rqts(priv, priv->direct_tir, max_nch);
|
||||
if (err)
|
||||
goto err_destroy_indirect_rqts;
|
||||
|
||||
@ -774,7 +776,7 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
|
||||
if (err)
|
||||
goto err_destroy_direct_rqts;
|
||||
|
||||
err = mlx5e_create_direct_tirs(priv, priv->direct_tir);
|
||||
err = mlx5e_create_direct_tirs(priv, priv->direct_tir, max_nch);
|
||||
if (err)
|
||||
goto err_destroy_indirect_tirs;
|
||||
|
||||
@ -799,11 +801,11 @@ err_destroy_root_ft:
|
||||
err_destroy_ttc_table:
|
||||
mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
|
||||
err_destroy_direct_tirs:
|
||||
mlx5e_destroy_direct_tirs(priv, priv->direct_tir);
|
||||
mlx5e_destroy_direct_tirs(priv, priv->direct_tir, max_nch);
|
||||
err_destroy_indirect_tirs:
|
||||
mlx5e_destroy_indirect_tirs(priv);
|
||||
err_destroy_direct_rqts:
|
||||
mlx5e_destroy_direct_rqts(priv, priv->direct_tir);
|
||||
mlx5e_destroy_direct_rqts(priv, priv->direct_tir, max_nch);
|
||||
err_destroy_indirect_rqts:
|
||||
mlx5e_destroy_rqt(priv, &priv->indir_rqt);
|
||||
err_close_drop_rq:
|
||||
@ -813,13 +815,15 @@ err_close_drop_rq:
|
||||
|
||||
static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
|
||||
{
|
||||
u16 max_nch = priv->max_nch;
|
||||
|
||||
mlx5e_ethtool_cleanup_steering(priv);
|
||||
rep_vport_rx_rule_destroy(priv);
|
||||
mlx5e_destroy_rep_root_ft(priv);
|
||||
mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
|
||||
mlx5e_destroy_direct_tirs(priv, priv->direct_tir);
|
||||
mlx5e_destroy_direct_tirs(priv, priv->direct_tir, max_nch);
|
||||
mlx5e_destroy_indirect_tirs(priv);
|
||||
mlx5e_destroy_direct_rqts(priv, priv->direct_tir);
|
||||
mlx5e_destroy_direct_rqts(priv, priv->direct_tir, max_nch);
|
||||
mlx5e_destroy_rqt(priv, &priv->indir_rqt);
|
||||
mlx5e_close_drop_rq(&priv->drop_rq);
|
||||
}
|
||||
|
@ -407,13 +407,13 @@ static void mlx5e_stats_grp_sw_update_stats_ptp(struct mlx5e_priv *priv,
|
||||
{
|
||||
int i;
|
||||
|
||||
if (!priv->port_ptp_opened)
|
||||
if (!priv->tx_ptp_opened)
|
||||
return;
|
||||
|
||||
mlx5e_stats_grp_sw_update_stats_ch_stats(s, &priv->port_ptp_stats.ch);
|
||||
mlx5e_stats_grp_sw_update_stats_ch_stats(s, &priv->ptp_stats.ch);
|
||||
|
||||
for (i = 0; i < priv->max_opened_tc; i++) {
|
||||
mlx5e_stats_grp_sw_update_stats_sq(s, &priv->port_ptp_stats.sq[i]);
|
||||
mlx5e_stats_grp_sw_update_stats_sq(s, &priv->ptp_stats.sq[i]);
|
||||
|
||||
/* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */
|
||||
barrier();
|
||||
@ -1851,7 +1851,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(qos) { return; }
|
||||
|
||||
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ptp)
|
||||
{
|
||||
return priv->port_ptp_opened ?
|
||||
return priv->tx_ptp_opened ?
|
||||
NUM_PTP_CH_STATS +
|
||||
((NUM_PTP_SQ_STATS + NUM_PTP_CQ_STATS) * priv->max_opened_tc) :
|
||||
0;
|
||||
@ -1861,7 +1861,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ptp)
|
||||
{
|
||||
int i, tc;
|
||||
|
||||
if (!priv->port_ptp_opened)
|
||||
if (!priv->tx_ptp_opened)
|
||||
return idx;
|
||||
|
||||
for (i = 0; i < NUM_PTP_CH_STATS; i++)
|
||||
@ -1884,24 +1884,24 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ptp)
|
||||
{
|
||||
int i, tc;
|
||||
|
||||
if (!priv->port_ptp_opened)
|
||||
if (!priv->tx_ptp_opened)
|
||||
return idx;
|
||||
|
||||
for (i = 0; i < NUM_PTP_CH_STATS; i++)
|
||||
data[idx++] =
|
||||
MLX5E_READ_CTR64_CPU(&priv->port_ptp_stats.ch,
|
||||
MLX5E_READ_CTR64_CPU(&priv->ptp_stats.ch,
|
||||
ptp_ch_stats_desc, i);
|
||||
|
||||
for (tc = 0; tc < priv->max_opened_tc; tc++)
|
||||
for (i = 0; i < NUM_PTP_SQ_STATS; i++)
|
||||
data[idx++] =
|
||||
MLX5E_READ_CTR64_CPU(&priv->port_ptp_stats.sq[tc],
|
||||
MLX5E_READ_CTR64_CPU(&priv->ptp_stats.sq[tc],
|
||||
ptp_sq_stats_desc, i);
|
||||
|
||||
for (tc = 0; tc < priv->max_opened_tc; tc++)
|
||||
for (i = 0; i < NUM_PTP_CQ_STATS; i++)
|
||||
data[idx++] =
|
||||
MLX5E_READ_CTR64_CPU(&priv->port_ptp_stats.cq[tc],
|
||||
MLX5E_READ_CTR64_CPU(&priv->ptp_stats.cq[tc],
|
||||
ptp_cq_stats_desc, i);
|
||||
|
||||
return idx;
|
||||
|
@ -447,11 +447,11 @@ static void mlx5e_hairpin_destroy_transport(struct mlx5e_hairpin *hp)
|
||||
|
||||
static int mlx5e_hairpin_fill_rqt_rqns(struct mlx5e_hairpin *hp, void *rqtc)
|
||||
{
|
||||
u32 *indirection_rqt, rqn;
|
||||
struct mlx5e_priv *priv = hp->func_priv;
|
||||
int i, ix, sz = MLX5E_INDIR_RQT_SIZE;
|
||||
u32 *indirection_rqt, rqn;
|
||||
|
||||
indirection_rqt = kzalloc(sz, GFP_KERNEL);
|
||||
indirection_rqt = kcalloc(sz, sizeof(*indirection_rqt), GFP_KERNEL);
|
||||
if (!indirection_rqt)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -142,7 +142,7 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||
return txq_ix;
|
||||
}
|
||||
|
||||
if (unlikely(priv->channels.port_ptp))
|
||||
if (unlikely(priv->channels.ptp))
|
||||
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
|
||||
mlx5e_use_ptpsq(skb))
|
||||
return mlx5e_select_ptpsq(dev, skb);
|
||||
|
@ -340,7 +340,7 @@ static int mlx5_health_try_recover(struct mlx5_core_dev *dev)
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
mlx5_core_info(dev, "health revovery succeded\n");
|
||||
mlx5_core_info(dev, "health recovery succeeded\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -33,6 +33,7 @@
|
||||
#include <rdma/ib_verbs.h>
|
||||
#include <linux/mlx5/fs.h>
|
||||
#include "en.h"
|
||||
#include "en/params.h"
|
||||
#include "ipoib.h"
|
||||
|
||||
#define IB_DEFAULT_Q_KEY 0xb1b
|
||||
@ -372,6 +373,7 @@ static void mlx5i_destroy_flow_steering(struct mlx5e_priv *priv)
|
||||
static int mlx5i_init_rx(struct mlx5e_priv *priv)
|
||||
{
|
||||
struct mlx5_core_dev *mdev = priv->mdev;
|
||||
u16 max_nch = priv->max_nch;
|
||||
int err;
|
||||
|
||||
mlx5e_create_q_counters(priv);
|
||||
@ -386,7 +388,7 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv)
|
||||
if (err)
|
||||
goto err_close_drop_rq;
|
||||
|
||||
err = mlx5e_create_direct_rqts(priv, priv->direct_tir);
|
||||
err = mlx5e_create_direct_rqts(priv, priv->direct_tir, max_nch);
|
||||
if (err)
|
||||
goto err_destroy_indirect_rqts;
|
||||
|
||||
@ -394,7 +396,7 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv)
|
||||
if (err)
|
||||
goto err_destroy_direct_rqts;
|
||||
|
||||
err = mlx5e_create_direct_tirs(priv, priv->direct_tir);
|
||||
err = mlx5e_create_direct_tirs(priv, priv->direct_tir, max_nch);
|
||||
if (err)
|
||||
goto err_destroy_indirect_tirs;
|
||||
|
||||
@ -405,11 +407,11 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv)
|
||||
return 0;
|
||||
|
||||
err_destroy_direct_tirs:
|
||||
mlx5e_destroy_direct_tirs(priv, priv->direct_tir);
|
||||
mlx5e_destroy_direct_tirs(priv, priv->direct_tir, max_nch);
|
||||
err_destroy_indirect_tirs:
|
||||
mlx5e_destroy_indirect_tirs(priv);
|
||||
err_destroy_direct_rqts:
|
||||
mlx5e_destroy_direct_rqts(priv, priv->direct_tir);
|
||||
mlx5e_destroy_direct_rqts(priv, priv->direct_tir, max_nch);
|
||||
err_destroy_indirect_rqts:
|
||||
mlx5e_destroy_rqt(priv, &priv->indir_rqt);
|
||||
err_close_drop_rq:
|
||||
@ -421,10 +423,12 @@ err_destroy_q_counters:
|
||||
|
||||
static void mlx5i_cleanup_rx(struct mlx5e_priv *priv)
|
||||
{
|
||||
u16 max_nch = priv->max_nch;
|
||||
|
||||
mlx5i_destroy_flow_steering(priv);
|
||||
mlx5e_destroy_direct_tirs(priv, priv->direct_tir);
|
||||
mlx5e_destroy_direct_tirs(priv, priv->direct_tir, max_nch);
|
||||
mlx5e_destroy_indirect_tirs(priv);
|
||||
mlx5e_destroy_direct_rqts(priv, priv->direct_tir);
|
||||
mlx5e_destroy_direct_rqts(priv, priv->direct_tir, max_nch);
|
||||
mlx5e_destroy_rqt(priv, &priv->indir_rqt);
|
||||
mlx5e_close_drop_rq(&priv->drop_rq);
|
||||
mlx5e_destroy_q_counters(priv);
|
||||
|
@ -105,4 +105,15 @@ static inline ktime_t mlx5_real_time_cyc2time(struct mlx5_clock *clock,
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline cqe_ts_to_ns mlx5_rq_ts_translator(struct mlx5_core_dev *mdev)
|
||||
{
|
||||
return mlx5_is_real_time_rq(mdev) ? mlx5_real_time_cyc2time :
|
||||
mlx5_timecounter_cyc2time;
|
||||
}
|
||||
|
||||
static inline cqe_ts_to_ns mlx5_sq_ts_translator(struct mlx5_core_dev *mdev)
|
||||
{
|
||||
return mlx5_is_real_time_sq(mdev) ? mlx5_real_time_cyc2time :
|
||||
mlx5_timecounter_cyc2time;
|
||||
}
|
||||
#endif
|
||||
|
Loading…
Reference in New Issue
Block a user