mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-26 05:34:13 +08:00
mlx5-fixes-2019-07-25
-----BEGIN PGP SIGNATURE----- iQEzBAABCAAdFiEEGhZs6bAKwk/OTgTpSD+KveBX+j4FAl06EYUACgkQSD+KveBX +j7R4QgAht/C4115mi1Tc3d3zYjHp3SWLFxwK4vF0U2j30ouhsj1oaIP8bQdw6Mr 6hS4IZSdKNO5wo+NNqMnLYVtsAnvNGOuvYwUvMK5TDkdDb2lIzRlxihpWgTqWzXr 6Eh3nv5rTItgLMqxbLL1EE8Idlx3HQDJtU2a/AmxjmU/TqSKzbBTpnKIlRMPDFNC PLWXjFXBR/XtcTbsnj7RtlD2HkDAERVTiMP2mlTvXjXxlN56YXCle4CWZamgH9H4 bTCrZwQHH9hllMAnAkq4gpHN7Z6/eXjV6jzu+BOE7ChOaEC5N2F+p5ARXqe+HwRL apMYgRH5u4mzDt+1CbwR/I/pFOw3WA== =NXce -----END PGP SIGNATURE----- Merge tag 'mlx5-fixes-2019-07-25' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux Saeed Mahameed says: ==================== Mellanox, mlx5 fixes 2019-07-25 This series introduces some fixes to mlx5 driver. 1) Ariel is addressing an issue with enacp flow counter race condition 2) Aya fixes ethtool speed handling 3) Edward fixes modify_cq hw bits alignment 4) Maor fixes RDMA_RX capabilities handling 5) Mark reverses unregister devices order to address an issue with LAG 6) From Tariq, - wrong max num channels indication regression - TLS counters naming and documentation as suggested by Jakub - kTLS, Call WARN_ONCE on netdev mismatch There is one patch in this series that touches nfp driver to align TLS statistics names with latest documentation, Jakub is CC'ed. Please pull and let me know if there is any problem. For -stable v4.9: ('net/mlx5: Use reversed order when unregister devices') For -stable v4.20 ('net/mlx5e: Prevent encap flow counter update async to user query') ('net/mlx5: Fix modify_cq_in alignment') For -stable v5.1 ('net/mlx5e: Fix matching of speed to PRM link modes') For -stable v5.2 ('net/mlx5: Add missing RDMA_RX capabilities') ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
0a062ba725
@ -424,13 +424,24 @@ Statistics
|
||||
Following minimum set of TLS-related statistics should be reported
|
||||
by the driver:
|
||||
|
||||
* ``rx_tls_decrypted`` - number of successfully decrypted TLS segments
|
||||
* ``tx_tls_encrypted`` - number of in-order TLS segments passed to device
|
||||
for encryption
|
||||
* ``rx_tls_decrypted_packets`` - number of successfully decrypted RX packets
|
||||
which were part of a TLS stream.
|
||||
* ``rx_tls_decrypted_bytes`` - number of TLS payload bytes in RX packets
|
||||
which were successfully decrypted.
|
||||
* ``tx_tls_encrypted_packets`` - number of TX packets passed to the device
|
||||
for encryption of their TLS payload.
|
||||
* ``tx_tls_encrypted_bytes`` - number of TLS payload bytes in TX packets
|
||||
passed to the device for encryption.
|
||||
* ``tx_tls_ctx`` - number of TLS TX HW offload contexts added to device for
|
||||
encryption.
|
||||
* ``tx_tls_ooo`` - number of TX packets which were part of a TLS stream
|
||||
but did not arrive in the expected order
|
||||
* ``tx_tls_drop_no_sync_data`` - number of TX packets dropped because
|
||||
they arrived out of order and associated record could not be found
|
||||
but did not arrive in the expected order.
|
||||
* ``tx_tls_drop_no_sync_data`` - number of TX packets which were part of
|
||||
a TLS stream dropped, because they arrived out of order and associated
|
||||
record could not be found.
|
||||
* ``tx_tls_drop_bypass_req`` - number of TX packets which were part of a TLS
|
||||
stream dropped, because they contain both data that has been encrypted by
|
||||
software and data that expects hardware crypto offload.
|
||||
|
||||
Notable corner cases, exceptions and additional requirements
|
||||
============================================================
|
||||
|
@ -213,7 +213,7 @@ void mlx5_unregister_device(struct mlx5_core_dev *dev)
|
||||
struct mlx5_interface *intf;
|
||||
|
||||
mutex_lock(&mlx5_intf_mutex);
|
||||
list_for_each_entry(intf, &intf_list, list)
|
||||
list_for_each_entry_reverse(intf, &intf_list, list)
|
||||
mlx5_remove_device(intf, priv);
|
||||
list_del(&priv->dev_list);
|
||||
mutex_unlock(&mlx5_intf_mutex);
|
||||
|
@ -159,7 +159,7 @@ do { \
|
||||
enum mlx5e_rq_group {
|
||||
MLX5E_RQ_GROUP_REGULAR,
|
||||
MLX5E_RQ_GROUP_XSK,
|
||||
MLX5E_NUM_RQ_GROUPS /* Keep last. */
|
||||
#define MLX5E_NUM_RQ_GROUPS(g) (1 + MLX5E_RQ_GROUP_##g)
|
||||
};
|
||||
|
||||
static inline u16 mlx5_min_rx_wqes(int wq_type, u32 wq_size)
|
||||
@ -182,14 +182,6 @@ static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
|
||||
min_t(int, mlx5_comp_vectors_count(mdev), MLX5E_MAX_NUM_CHANNELS);
|
||||
}
|
||||
|
||||
/* Use this function to get max num channels after netdev was created */
|
||||
static inline int mlx5e_get_netdev_max_channels(struct net_device *netdev)
|
||||
{
|
||||
return min_t(unsigned int,
|
||||
netdev->num_rx_queues / MLX5E_NUM_RQ_GROUPS,
|
||||
netdev->num_tx_queues);
|
||||
}
|
||||
|
||||
struct mlx5e_tx_wqe {
|
||||
struct mlx5_wqe_ctrl_seg ctrl;
|
||||
struct mlx5_wqe_eth_seg eth;
|
||||
@ -830,6 +822,7 @@ struct mlx5e_priv {
|
||||
struct net_device *netdev;
|
||||
struct mlx5e_stats stats;
|
||||
struct mlx5e_channel_stats channel_stats[MLX5E_MAX_NUM_CHANNELS];
|
||||
u16 max_nch;
|
||||
u8 max_opened_tc;
|
||||
struct hwtstamp_config tstamp;
|
||||
u16 q_counter;
|
||||
@ -871,6 +864,7 @@ struct mlx5e_profile {
|
||||
mlx5e_fp_handle_rx_cqe handle_rx_cqe_mpwqe;
|
||||
} rx_handlers;
|
||||
int max_tc;
|
||||
u8 rq_groups;
|
||||
};
|
||||
|
||||
void mlx5e_build_ptys2ethtool_map(void);
|
||||
|
@ -66,9 +66,10 @@ static inline void mlx5e_qid_get_ch_and_group(struct mlx5e_params *params,
|
||||
*group = qid / nch;
|
||||
}
|
||||
|
||||
static inline bool mlx5e_qid_validate(struct mlx5e_params *params, u64 qid)
|
||||
static inline bool mlx5e_qid_validate(const struct mlx5e_profile *profile,
|
||||
struct mlx5e_params *params, u64 qid)
|
||||
{
|
||||
return qid < params->num_channels * MLX5E_NUM_RQ_GROUPS;
|
||||
return qid < params->num_channels * profile->rq_groups;
|
||||
}
|
||||
|
||||
/* Parameter calculations */
|
||||
|
@ -78,9 +78,10 @@ static const u32 mlx5e_ext_link_speed[MLX5E_EXT_LINK_MODES_NUMBER] = {
|
||||
};
|
||||
|
||||
static void mlx5e_port_get_speed_arr(struct mlx5_core_dev *mdev,
|
||||
const u32 **arr, u32 *size)
|
||||
const u32 **arr, u32 *size,
|
||||
bool force_legacy)
|
||||
{
|
||||
bool ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
|
||||
bool ext = force_legacy ? false : MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
|
||||
|
||||
*size = ext ? ARRAY_SIZE(mlx5e_ext_link_speed) :
|
||||
ARRAY_SIZE(mlx5e_link_speed);
|
||||
@ -152,7 +153,8 @@ int mlx5_port_set_eth_ptys(struct mlx5_core_dev *dev, bool an_disable,
|
||||
sizeof(out), MLX5_REG_PTYS, 0, 1);
|
||||
}
|
||||
|
||||
u32 mlx5e_port_ptys2speed(struct mlx5_core_dev *mdev, u32 eth_proto_oper)
|
||||
u32 mlx5e_port_ptys2speed(struct mlx5_core_dev *mdev, u32 eth_proto_oper,
|
||||
bool force_legacy)
|
||||
{
|
||||
unsigned long temp = eth_proto_oper;
|
||||
const u32 *table;
|
||||
@ -160,7 +162,7 @@ u32 mlx5e_port_ptys2speed(struct mlx5_core_dev *mdev, u32 eth_proto_oper)
|
||||
u32 max_size;
|
||||
int i;
|
||||
|
||||
mlx5e_port_get_speed_arr(mdev, &table, &max_size);
|
||||
mlx5e_port_get_speed_arr(mdev, &table, &max_size, force_legacy);
|
||||
i = find_first_bit(&temp, max_size);
|
||||
if (i < max_size)
|
||||
speed = table[i];
|
||||
@ -170,6 +172,7 @@ u32 mlx5e_port_ptys2speed(struct mlx5_core_dev *mdev, u32 eth_proto_oper)
|
||||
int mlx5e_port_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
|
||||
{
|
||||
struct mlx5e_port_eth_proto eproto;
|
||||
bool force_legacy = false;
|
||||
bool ext;
|
||||
int err;
|
||||
|
||||
@ -177,8 +180,13 @@ int mlx5e_port_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
|
||||
err = mlx5_port_query_eth_proto(mdev, 1, ext, &eproto);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
*speed = mlx5e_port_ptys2speed(mdev, eproto.oper);
|
||||
if (ext && !eproto.admin) {
|
||||
force_legacy = true;
|
||||
err = mlx5_port_query_eth_proto(mdev, 1, false, &eproto);
|
||||
if (err)
|
||||
goto out;
|
||||
}
|
||||
*speed = mlx5e_port_ptys2speed(mdev, eproto.oper, force_legacy);
|
||||
if (!(*speed))
|
||||
err = -EINVAL;
|
||||
|
||||
@ -201,7 +209,7 @@ int mlx5e_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
mlx5e_port_get_speed_arr(mdev, &table, &max_size);
|
||||
mlx5e_port_get_speed_arr(mdev, &table, &max_size, false);
|
||||
for (i = 0; i < max_size; ++i)
|
||||
if (eproto.cap & MLX5E_PROT_MASK(i))
|
||||
max_speed = max(max_speed, table[i]);
|
||||
@ -210,14 +218,15 @@ int mlx5e_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
|
||||
return 0;
|
||||
}
|
||||
|
||||
u32 mlx5e_port_speed2linkmodes(struct mlx5_core_dev *mdev, u32 speed)
|
||||
u32 mlx5e_port_speed2linkmodes(struct mlx5_core_dev *mdev, u32 speed,
|
||||
bool force_legacy)
|
||||
{
|
||||
u32 link_modes = 0;
|
||||
const u32 *table;
|
||||
u32 max_size;
|
||||
int i;
|
||||
|
||||
mlx5e_port_get_speed_arr(mdev, &table, &max_size);
|
||||
mlx5e_port_get_speed_arr(mdev, &table, &max_size, force_legacy);
|
||||
for (i = 0; i < max_size; ++i) {
|
||||
if (table[i] == speed)
|
||||
link_modes |= MLX5E_PROT_MASK(i);
|
||||
|
@ -48,10 +48,12 @@ void mlx5_port_query_eth_autoneg(struct mlx5_core_dev *dev, u8 *an_status,
|
||||
u8 *an_disable_cap, u8 *an_disable_admin);
|
||||
int mlx5_port_set_eth_ptys(struct mlx5_core_dev *dev, bool an_disable,
|
||||
u32 proto_admin, bool ext);
|
||||
u32 mlx5e_port_ptys2speed(struct mlx5_core_dev *mdev, u32 eth_proto_oper);
|
||||
u32 mlx5e_port_ptys2speed(struct mlx5_core_dev *mdev, u32 eth_proto_oper,
|
||||
bool force_legacy);
|
||||
int mlx5e_port_linkspeed(struct mlx5_core_dev *mdev, u32 *speed);
|
||||
int mlx5e_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed);
|
||||
u32 mlx5e_port_speed2linkmodes(struct mlx5_core_dev *mdev, u32 speed);
|
||||
u32 mlx5e_port_speed2linkmodes(struct mlx5_core_dev *mdev, u32 speed,
|
||||
bool force_legacy);
|
||||
|
||||
int mlx5e_port_query_pbmc(struct mlx5_core_dev *mdev, void *out);
|
||||
int mlx5e_port_set_pbmc(struct mlx5_core_dev *mdev, void *in);
|
||||
|
@ -412,7 +412,7 @@ struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev,
|
||||
goto out;
|
||||
|
||||
tls_ctx = tls_get_ctx(skb->sk);
|
||||
if (unlikely(tls_ctx->netdev != netdev))
|
||||
if (unlikely(WARN_ON_ONCE(tls_ctx->netdev != netdev)))
|
||||
goto err_out;
|
||||
|
||||
priv_tx = mlx5e_get_ktls_tx_priv_ctx(tls_ctx);
|
||||
|
@ -391,7 +391,7 @@ void mlx5e_ethtool_get_channels(struct mlx5e_priv *priv,
|
||||
{
|
||||
mutex_lock(&priv->state_lock);
|
||||
|
||||
ch->max_combined = mlx5e_get_netdev_max_channels(priv->netdev);
|
||||
ch->max_combined = priv->max_nch;
|
||||
ch->combined_count = priv->channels.params.num_channels;
|
||||
if (priv->xsk.refcnt) {
|
||||
/* The upper half are XSK queues. */
|
||||
@ -785,7 +785,7 @@ static void ptys2ethtool_supported_advertised_port(struct ethtool_link_ksettings
|
||||
}
|
||||
|
||||
static void get_speed_duplex(struct net_device *netdev,
|
||||
u32 eth_proto_oper,
|
||||
u32 eth_proto_oper, bool force_legacy,
|
||||
struct ethtool_link_ksettings *link_ksettings)
|
||||
{
|
||||
struct mlx5e_priv *priv = netdev_priv(netdev);
|
||||
@ -795,7 +795,7 @@ static void get_speed_duplex(struct net_device *netdev,
|
||||
if (!netif_carrier_ok(netdev))
|
||||
goto out;
|
||||
|
||||
speed = mlx5e_port_ptys2speed(priv->mdev, eth_proto_oper);
|
||||
speed = mlx5e_port_ptys2speed(priv->mdev, eth_proto_oper, force_legacy);
|
||||
if (!speed) {
|
||||
speed = SPEED_UNKNOWN;
|
||||
goto out;
|
||||
@ -914,8 +914,8 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
|
||||
/* Fields: eth_proto_admin and ext_eth_proto_admin are
|
||||
* mutually exclusive. Hence try reading legacy advertising
|
||||
* when extended advertising is zero.
|
||||
* admin_ext indicates how eth_proto_admin should be
|
||||
* interpreted
|
||||
* admin_ext indicates which proto_admin (ext vs. legacy)
|
||||
* should be read and interpreted
|
||||
*/
|
||||
admin_ext = ext;
|
||||
if (ext && !eth_proto_admin) {
|
||||
@ -924,7 +924,7 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
|
||||
admin_ext = false;
|
||||
}
|
||||
|
||||
eth_proto_oper = MLX5_GET_ETH_PROTO(ptys_reg, out, ext,
|
||||
eth_proto_oper = MLX5_GET_ETH_PROTO(ptys_reg, out, admin_ext,
|
||||
eth_proto_oper);
|
||||
eth_proto_lp = MLX5_GET(ptys_reg, out, eth_proto_lp_advertise);
|
||||
an_disable_admin = MLX5_GET(ptys_reg, out, an_disable_admin);
|
||||
@ -939,7 +939,8 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
|
||||
get_supported(mdev, eth_proto_cap, link_ksettings);
|
||||
get_advertising(eth_proto_admin, tx_pause, rx_pause, link_ksettings,
|
||||
admin_ext);
|
||||
get_speed_duplex(priv->netdev, eth_proto_oper, link_ksettings);
|
||||
get_speed_duplex(priv->netdev, eth_proto_oper, !admin_ext,
|
||||
link_ksettings);
|
||||
|
||||
eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
|
||||
|
||||
@ -1016,45 +1017,69 @@ static u32 mlx5e_ethtool2ptys_ext_adver_link(const unsigned long *link_modes)
|
||||
return ptys_modes;
|
||||
}
|
||||
|
||||
static bool ext_link_mode_requested(const unsigned long *adver)
|
||||
{
|
||||
#define MLX5E_MIN_PTYS_EXT_LINK_MODE_BIT ETHTOOL_LINK_MODE_50000baseKR_Full_BIT
|
||||
int size = __ETHTOOL_LINK_MODE_MASK_NBITS - MLX5E_MIN_PTYS_EXT_LINK_MODE_BIT;
|
||||
__ETHTOOL_DECLARE_LINK_MODE_MASK(modes);
|
||||
|
||||
bitmap_set(modes, MLX5E_MIN_PTYS_EXT_LINK_MODE_BIT, size);
|
||||
return bitmap_intersects(modes, adver, __ETHTOOL_LINK_MODE_MASK_NBITS);
|
||||
}
|
||||
|
||||
static bool ext_speed_requested(u32 speed)
|
||||
{
|
||||
#define MLX5E_MAX_PTYS_LEGACY_SPEED 100000
|
||||
return !!(speed > MLX5E_MAX_PTYS_LEGACY_SPEED);
|
||||
}
|
||||
|
||||
static bool ext_requested(u8 autoneg, const unsigned long *adver, u32 speed)
|
||||
{
|
||||
bool ext_link_mode = ext_link_mode_requested(adver);
|
||||
bool ext_speed = ext_speed_requested(speed);
|
||||
|
||||
return autoneg == AUTONEG_ENABLE ? ext_link_mode : ext_speed;
|
||||
}
|
||||
|
||||
int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
|
||||
const struct ethtool_link_ksettings *link_ksettings)
|
||||
{
|
||||
struct mlx5_core_dev *mdev = priv->mdev;
|
||||
struct mlx5e_port_eth_proto eproto;
|
||||
const unsigned long *adver;
|
||||
bool an_changes = false;
|
||||
u8 an_disable_admin;
|
||||
bool ext_supported;
|
||||
bool ext_requested;
|
||||
u8 an_disable_cap;
|
||||
bool an_disable;
|
||||
u32 link_modes;
|
||||
u8 an_status;
|
||||
u8 autoneg;
|
||||
u32 speed;
|
||||
bool ext;
|
||||
int err;
|
||||
|
||||
u32 (*ethtool2ptys_adver_func)(const unsigned long *adver);
|
||||
|
||||
#define MLX5E_PTYS_EXT ((1ULL << ETHTOOL_LINK_MODE_50000baseKR_Full_BIT) - 1)
|
||||
|
||||
ext_requested = !!(link_ksettings->link_modes.advertising[0] >
|
||||
MLX5E_PTYS_EXT ||
|
||||
link_ksettings->link_modes.advertising[1]);
|
||||
ext_supported = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
|
||||
ext_requested &= ext_supported;
|
||||
|
||||
adver = link_ksettings->link_modes.advertising;
|
||||
autoneg = link_ksettings->base.autoneg;
|
||||
speed = link_ksettings->base.speed;
|
||||
ethtool2ptys_adver_func = ext_requested ?
|
||||
mlx5e_ethtool2ptys_ext_adver_link :
|
||||
|
||||
ext = ext_requested(autoneg, adver, speed),
|
||||
ext_supported = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
|
||||
if (!ext_supported && ext)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
ethtool2ptys_adver_func = ext ? mlx5e_ethtool2ptys_ext_adver_link :
|
||||
mlx5e_ethtool2ptys_adver_link;
|
||||
err = mlx5_port_query_eth_proto(mdev, 1, ext_requested, &eproto);
|
||||
err = mlx5_port_query_eth_proto(mdev, 1, ext, &eproto);
|
||||
if (err) {
|
||||
netdev_err(priv->netdev, "%s: query port eth proto failed: %d\n",
|
||||
__func__, err);
|
||||
goto out;
|
||||
}
|
||||
link_modes = link_ksettings->base.autoneg == AUTONEG_ENABLE ?
|
||||
ethtool2ptys_adver_func(link_ksettings->link_modes.advertising) :
|
||||
mlx5e_port_speed2linkmodes(mdev, speed);
|
||||
link_modes = autoneg == AUTONEG_ENABLE ? ethtool2ptys_adver_func(adver) :
|
||||
mlx5e_port_speed2linkmodes(mdev, speed, !ext);
|
||||
|
||||
link_modes = link_modes & eproto.cap;
|
||||
if (!link_modes) {
|
||||
@ -1067,14 +1092,14 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
|
||||
mlx5_port_query_eth_autoneg(mdev, &an_status, &an_disable_cap,
|
||||
&an_disable_admin);
|
||||
|
||||
an_disable = link_ksettings->base.autoneg == AUTONEG_DISABLE;
|
||||
an_disable = autoneg == AUTONEG_DISABLE;
|
||||
an_changes = ((!an_disable && an_disable_admin) ||
|
||||
(an_disable && !an_disable_admin));
|
||||
|
||||
if (!an_changes && link_modes == eproto.admin)
|
||||
goto out;
|
||||
|
||||
mlx5_port_set_eth_ptys(mdev, an_disable, link_modes, ext_requested);
|
||||
mlx5_port_set_eth_ptys(mdev, an_disable, link_modes, ext);
|
||||
mlx5_toggle_port_link(mdev);
|
||||
|
||||
out:
|
||||
|
@ -611,7 +611,8 @@ static int validate_flow(struct mlx5e_priv *priv,
|
||||
return -ENOSPC;
|
||||
|
||||
if (fs->ring_cookie != RX_CLS_FLOW_DISC)
|
||||
if (!mlx5e_qid_validate(&priv->channels.params, fs->ring_cookie))
|
||||
if (!mlx5e_qid_validate(priv->profile, &priv->channels.params,
|
||||
fs->ring_cookie))
|
||||
return -EINVAL;
|
||||
|
||||
switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
|
||||
|
@ -1677,10 +1677,10 @@ static int mlx5e_open_sqs(struct mlx5e_channel *c,
|
||||
struct mlx5e_channel_param *cparam)
|
||||
{
|
||||
struct mlx5e_priv *priv = c->priv;
|
||||
int err, tc, max_nch = mlx5e_get_netdev_max_channels(priv->netdev);
|
||||
int err, tc;
|
||||
|
||||
for (tc = 0; tc < params->num_tc; tc++) {
|
||||
int txq_ix = c->ix + tc * max_nch;
|
||||
int txq_ix = c->ix + tc * priv->max_nch;
|
||||
|
||||
err = mlx5e_open_txqsq(c, c->priv->tisn[tc], txq_ix,
|
||||
params, &cparam->sq, &c->sq[tc], tc);
|
||||
@ -2438,11 +2438,10 @@ int mlx5e_create_indirect_rqt(struct mlx5e_priv *priv)
|
||||
|
||||
int mlx5e_create_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs)
|
||||
{
|
||||
const int max_nch = mlx5e_get_netdev_max_channels(priv->netdev);
|
||||
int err;
|
||||
int ix;
|
||||
|
||||
for (ix = 0; ix < max_nch; ix++) {
|
||||
for (ix = 0; ix < priv->max_nch; ix++) {
|
||||
err = mlx5e_create_rqt(priv, 1 /*size */, &tirs[ix].rqt);
|
||||
if (unlikely(err))
|
||||
goto err_destroy_rqts;
|
||||
@ -2460,10 +2459,9 @@ err_destroy_rqts:
|
||||
|
||||
void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs)
|
||||
{
|
||||
const int max_nch = mlx5e_get_netdev_max_channels(priv->netdev);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < max_nch; i++)
|
||||
for (i = 0; i < priv->max_nch; i++)
|
||||
mlx5e_destroy_rqt(priv, &tirs[i].rqt);
|
||||
}
|
||||
|
||||
@ -2557,7 +2555,7 @@ static void mlx5e_redirect_rqts(struct mlx5e_priv *priv,
|
||||
mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, rrp);
|
||||
}
|
||||
|
||||
for (ix = 0; ix < mlx5e_get_netdev_max_channels(priv->netdev); ix++) {
|
||||
for (ix = 0; ix < priv->max_nch; ix++) {
|
||||
struct mlx5e_redirect_rqt_param direct_rrp = {
|
||||
.is_rss = false,
|
||||
{
|
||||
@ -2758,7 +2756,7 @@ static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv)
|
||||
goto free_in;
|
||||
}
|
||||
|
||||
for (ix = 0; ix < mlx5e_get_netdev_max_channels(priv->netdev); ix++) {
|
||||
for (ix = 0; ix < priv->max_nch; ix++) {
|
||||
err = mlx5_core_modify_tir(mdev, priv->direct_tir[ix].tirn,
|
||||
in, inlen);
|
||||
if (err)
|
||||
@ -2858,12 +2856,11 @@ static void mlx5e_netdev_set_tcs(struct net_device *netdev)
|
||||
|
||||
static void mlx5e_build_tc2txq_maps(struct mlx5e_priv *priv)
|
||||
{
|
||||
int max_nch = mlx5e_get_netdev_max_channels(priv->netdev);
|
||||
int i, tc;
|
||||
|
||||
for (i = 0; i < max_nch; i++)
|
||||
for (i = 0; i < priv->max_nch; i++)
|
||||
for (tc = 0; tc < priv->profile->max_tc; tc++)
|
||||
priv->channel_tc2txq[i][tc] = i + tc * max_nch;
|
||||
priv->channel_tc2txq[i][tc] = i + tc * priv->max_nch;
|
||||
}
|
||||
|
||||
static void mlx5e_build_tx2sq_maps(struct mlx5e_priv *priv)
|
||||
@ -2884,7 +2881,7 @@ static void mlx5e_build_tx2sq_maps(struct mlx5e_priv *priv)
|
||||
void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
|
||||
{
|
||||
int num_txqs = priv->channels.num * priv->channels.params.num_tc;
|
||||
int num_rxqs = priv->channels.num * MLX5E_NUM_RQ_GROUPS;
|
||||
int num_rxqs = priv->channels.num * priv->profile->rq_groups;
|
||||
struct net_device *netdev = priv->netdev;
|
||||
|
||||
mlx5e_netdev_set_tcs(netdev);
|
||||
@ -3306,7 +3303,6 @@ err_destroy_inner_tirs:
|
||||
|
||||
int mlx5e_create_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs)
|
||||
{
|
||||
const int max_nch = mlx5e_get_netdev_max_channels(priv->netdev);
|
||||
struct mlx5e_tir *tir;
|
||||
void *tirc;
|
||||
int inlen;
|
||||
@ -3319,7 +3315,7 @@ int mlx5e_create_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs)
|
||||
if (!in)
|
||||
return -ENOMEM;
|
||||
|
||||
for (ix = 0; ix < max_nch; ix++) {
|
||||
for (ix = 0; ix < priv->max_nch; ix++) {
|
||||
memset(in, 0, inlen);
|
||||
tir = &tirs[ix];
|
||||
tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
|
||||
@ -3358,10 +3354,9 @@ void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc)
|
||||
|
||||
void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs)
|
||||
{
|
||||
const int max_nch = mlx5e_get_netdev_max_channels(priv->netdev);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < max_nch; i++)
|
||||
for (i = 0; i < priv->max_nch; i++)
|
||||
mlx5e_destroy_tir(priv->mdev, &tirs[i]);
|
||||
}
|
||||
|
||||
@ -3487,7 +3482,7 @@ void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < mlx5e_get_netdev_max_channels(priv->netdev); i++) {
|
||||
for (i = 0; i < priv->max_nch; i++) {
|
||||
struct mlx5e_channel_stats *channel_stats = &priv->channel_stats[i];
|
||||
struct mlx5e_rq_stats *xskrq_stats = &channel_stats->xskrq;
|
||||
struct mlx5e_rq_stats *rq_stats = &channel_stats->rq;
|
||||
@ -4960,8 +4955,7 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
|
||||
return err;
|
||||
|
||||
mlx5e_build_nic_params(mdev, &priv->xsk, rss, &priv->channels.params,
|
||||
mlx5e_get_netdev_max_channels(netdev),
|
||||
netdev->mtu);
|
||||
priv->max_nch, netdev->mtu);
|
||||
|
||||
mlx5e_timestamp_init(priv);
|
||||
|
||||
@ -5164,6 +5158,7 @@ static const struct mlx5e_profile mlx5e_nic_profile = {
|
||||
.rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe,
|
||||
.rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
|
||||
.max_tc = MLX5E_MAX_NUM_TC,
|
||||
.rq_groups = MLX5E_NUM_RQ_GROUPS(XSK),
|
||||
};
|
||||
|
||||
/* mlx5e generic netdev management API (move to en_common.c) */
|
||||
@ -5181,6 +5176,7 @@ int mlx5e_netdev_init(struct net_device *netdev,
|
||||
priv->profile = profile;
|
||||
priv->ppriv = ppriv;
|
||||
priv->msglevel = MLX5E_MSG_LEVEL;
|
||||
priv->max_nch = netdev->num_rx_queues / max_t(u8, profile->rq_groups, 1);
|
||||
priv->max_opened_tc = 1;
|
||||
|
||||
mutex_init(&priv->state_lock);
|
||||
@ -5218,7 +5214,7 @@ struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
|
||||
|
||||
netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv),
|
||||
nch * profile->max_tc,
|
||||
nch * MLX5E_NUM_RQ_GROUPS);
|
||||
nch * profile->rq_groups);
|
||||
if (!netdev) {
|
||||
mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n");
|
||||
return NULL;
|
||||
|
@ -1701,6 +1701,7 @@ static const struct mlx5e_profile mlx5e_rep_profile = {
|
||||
.rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep,
|
||||
.rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
|
||||
.max_tc = 1,
|
||||
.rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR),
|
||||
};
|
||||
|
||||
static const struct mlx5e_profile mlx5e_uplink_rep_profile = {
|
||||
@ -1718,6 +1719,7 @@ static const struct mlx5e_profile mlx5e_uplink_rep_profile = {
|
||||
.rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep,
|
||||
.rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
|
||||
.max_tc = MLX5E_MAX_NUM_TC,
|
||||
.rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR),
|
||||
};
|
||||
|
||||
static bool
|
||||
|
@ -172,7 +172,7 @@ static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
|
||||
|
||||
memset(s, 0, sizeof(*s));
|
||||
|
||||
for (i = 0; i < mlx5e_get_netdev_max_channels(priv->netdev); i++) {
|
||||
for (i = 0; i < priv->max_nch; i++) {
|
||||
struct mlx5e_channel_stats *channel_stats =
|
||||
&priv->channel_stats[i];
|
||||
struct mlx5e_xdpsq_stats *xdpsq_red_stats = &channel_stats->xdpsq;
|
||||
@ -1395,7 +1395,7 @@ static const struct counter_desc ch_stats_desc[] = {
|
||||
|
||||
static int mlx5e_grp_channels_get_num_stats(struct mlx5e_priv *priv)
|
||||
{
|
||||
int max_nch = mlx5e_get_netdev_max_channels(priv->netdev);
|
||||
int max_nch = priv->max_nch;
|
||||
|
||||
return (NUM_RQ_STATS * max_nch) +
|
||||
(NUM_CH_STATS * max_nch) +
|
||||
@ -1409,8 +1409,8 @@ static int mlx5e_grp_channels_get_num_stats(struct mlx5e_priv *priv)
|
||||
static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data,
|
||||
int idx)
|
||||
{
|
||||
int max_nch = mlx5e_get_netdev_max_channels(priv->netdev);
|
||||
bool is_xsk = priv->xsk.ever_used;
|
||||
int max_nch = priv->max_nch;
|
||||
int i, j, tc;
|
||||
|
||||
for (i = 0; i < max_nch; i++)
|
||||
@ -1452,8 +1452,8 @@ static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data,
|
||||
static int mlx5e_grp_channels_fill_stats(struct mlx5e_priv *priv, u64 *data,
|
||||
int idx)
|
||||
{
|
||||
int max_nch = mlx5e_get_netdev_max_channels(priv->netdev);
|
||||
bool is_xsk = priv->xsk.ever_used;
|
||||
int max_nch = priv->max_nch;
|
||||
int i, j, tc;
|
||||
|
||||
for (i = 0; i < max_nch; i++)
|
||||
|
@ -1230,13 +1230,13 @@ static struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow)
|
||||
void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
|
||||
{
|
||||
struct mlx5e_neigh *m_neigh = &nhe->m_neigh;
|
||||
u64 bytes, packets, lastuse = 0;
|
||||
struct mlx5e_tc_flow *flow;
|
||||
struct mlx5e_encap_entry *e;
|
||||
struct mlx5_fc *counter;
|
||||
struct neigh_table *tbl;
|
||||
bool neigh_used = false;
|
||||
struct neighbour *n;
|
||||
u64 lastuse;
|
||||
|
||||
if (m_neigh->family == AF_INET)
|
||||
tbl = &arp_tbl;
|
||||
@ -1256,7 +1256,7 @@ void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
|
||||
encaps[efi->index]);
|
||||
if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
|
||||
counter = mlx5e_tc_get_counter(flow);
|
||||
mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
|
||||
lastuse = mlx5_fc_query_lastuse(counter);
|
||||
if (time_after((unsigned long)lastuse, nhe->reported_lastuse)) {
|
||||
neigh_used = true;
|
||||
break;
|
||||
|
@ -68,7 +68,7 @@ enum fs_flow_table_type {
|
||||
FS_FT_SNIFFER_RX = 0X5,
|
||||
FS_FT_SNIFFER_TX = 0X6,
|
||||
FS_FT_RDMA_RX = 0X7,
|
||||
FS_FT_MAX_TYPE = FS_FT_SNIFFER_TX,
|
||||
FS_FT_MAX_TYPE = FS_FT_RDMA_RX,
|
||||
};
|
||||
|
||||
enum fs_flow_table_op_mod {
|
||||
@ -275,7 +275,8 @@ void mlx5_cleanup_fs(struct mlx5_core_dev *dev);
|
||||
(type == FS_FT_FDB) ? MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) : \
|
||||
(type == FS_FT_SNIFFER_RX) ? MLX5_CAP_FLOWTABLE_SNIFFER_RX(mdev, cap) : \
|
||||
(type == FS_FT_SNIFFER_TX) ? MLX5_CAP_FLOWTABLE_SNIFFER_TX(mdev, cap) : \
|
||||
(BUILD_BUG_ON_ZERO(FS_FT_SNIFFER_TX != FS_FT_MAX_TYPE))\
|
||||
(type == FS_FT_RDMA_RX) ? MLX5_CAP_FLOWTABLE_RDMA_RX(mdev, cap) : \
|
||||
(BUILD_BUG_ON_ZERO(FS_FT_RDMA_RX != FS_FT_MAX_TYPE))\
|
||||
)
|
||||
|
||||
#endif
|
||||
|
@ -369,6 +369,11 @@ int mlx5_fc_query(struct mlx5_core_dev *dev, struct mlx5_fc *counter,
|
||||
}
|
||||
EXPORT_SYMBOL(mlx5_fc_query);
|
||||
|
||||
u64 mlx5_fc_query_lastuse(struct mlx5_fc *counter)
|
||||
{
|
||||
return counter->cache.lastuse;
|
||||
}
|
||||
|
||||
void mlx5_fc_query_cached(struct mlx5_fc *counter,
|
||||
u64 *bytes, u64 *packets, u64 *lastuse)
|
||||
{
|
||||
|
@ -88,8 +88,7 @@ int mlx5i_init(struct mlx5_core_dev *mdev,
|
||||
netdev->mtu = netdev->max_mtu;
|
||||
|
||||
mlx5e_build_nic_params(mdev, NULL, &priv->rss_params, &priv->channels.params,
|
||||
mlx5e_get_netdev_max_channels(netdev),
|
||||
netdev->mtu);
|
||||
priv->max_nch, netdev->mtu);
|
||||
mlx5i_build_nic_params(mdev, &priv->channels.params);
|
||||
|
||||
mlx5e_timestamp_init(priv);
|
||||
@ -118,11 +117,10 @@ void mlx5i_cleanup(struct mlx5e_priv *priv)
|
||||
|
||||
static void mlx5i_grp_sw_update_stats(struct mlx5e_priv *priv)
|
||||
{
|
||||
int max_nch = mlx5e_get_netdev_max_channels(priv->netdev);
|
||||
struct mlx5e_sw_stats s = { 0 };
|
||||
int i, j;
|
||||
|
||||
for (i = 0; i < max_nch; i++) {
|
||||
for (i = 0; i < priv->max_nch; i++) {
|
||||
struct mlx5e_channel_stats *channel_stats;
|
||||
struct mlx5e_rq_stats *rq_stats;
|
||||
|
||||
@ -436,6 +434,7 @@ static const struct mlx5e_profile mlx5i_nic_profile = {
|
||||
.rx_handlers.handle_rx_cqe = mlx5i_handle_rx_cqe,
|
||||
.rx_handlers.handle_rx_cqe_mpwqe = NULL, /* Not supported */
|
||||
.max_tc = MLX5I_MAX_NUM_TC,
|
||||
.rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR),
|
||||
};
|
||||
|
||||
/* mlx5i netdev NDos */
|
||||
|
@ -355,6 +355,7 @@ static const struct mlx5e_profile mlx5i_pkey_nic_profile = {
|
||||
.rx_handlers.handle_rx_cqe = mlx5i_handle_rx_cqe,
|
||||
.rx_handlers.handle_rx_cqe_mpwqe = NULL, /* Not supported */
|
||||
.max_tc = MLX5I_MAX_NUM_TC,
|
||||
.rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR),
|
||||
};
|
||||
|
||||
const struct mlx5e_profile *mlx5i_pkey_get_profile(void)
|
||||
|
@ -444,12 +444,12 @@ static u8 *nfp_vnic_get_sw_stats_strings(struct net_device *netdev, u8 *data)
|
||||
data = nfp_pr_et(data, "hw_rx_csum_complete");
|
||||
data = nfp_pr_et(data, "hw_rx_csum_err");
|
||||
data = nfp_pr_et(data, "rx_replace_buf_alloc_fail");
|
||||
data = nfp_pr_et(data, "rx_tls_decrypted");
|
||||
data = nfp_pr_et(data, "rx_tls_decrypted_packets");
|
||||
data = nfp_pr_et(data, "hw_tx_csum");
|
||||
data = nfp_pr_et(data, "hw_tx_inner_csum");
|
||||
data = nfp_pr_et(data, "tx_gather");
|
||||
data = nfp_pr_et(data, "tx_lso");
|
||||
data = nfp_pr_et(data, "tx_tls_encrypted");
|
||||
data = nfp_pr_et(data, "tx_tls_encrypted_packets");
|
||||
data = nfp_pr_et(data, "tx_tls_ooo");
|
||||
data = nfp_pr_et(data, "tx_tls_drop_no_sync_data");
|
||||
|
||||
|
@ -220,6 +220,7 @@ int mlx5_modify_rule_destination(struct mlx5_flow_handle *handler,
|
||||
|
||||
struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging);
|
||||
void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter);
|
||||
u64 mlx5_fc_query_lastuse(struct mlx5_fc *counter);
|
||||
void mlx5_fc_query_cached(struct mlx5_fc *counter,
|
||||
u64 *bytes, u64 *packets, u64 *lastuse);
|
||||
int mlx5_fc_query(struct mlx5_core_dev *dev, struct mlx5_fc *counter,
|
||||
|
@ -5975,10 +5975,12 @@ struct mlx5_ifc_modify_cq_in_bits {
|
||||
|
||||
struct mlx5_ifc_cqc_bits cq_context;
|
||||
|
||||
u8 reserved_at_280[0x40];
|
||||
u8 reserved_at_280[0x60];
|
||||
|
||||
u8 cq_umem_valid[0x1];
|
||||
u8 reserved_at_2c1[0x5bf];
|
||||
u8 reserved_at_2e1[0x1f];
|
||||
|
||||
u8 reserved_at_300[0x580];
|
||||
|
||||
u8 pas[0][0x40];
|
||||
};
|
||||
|
Loading…
Reference in New Issue
Block a user