mlx5-fixes-2022-02-23

-----BEGIN PGP SIGNATURE-----
 
 iQEzBAABCAAdFiEEGhZs6bAKwk/OTgTpSD+KveBX+j4FAmIWzHMACgkQSD+KveBX
 +j7LwwgAj1T1YcCWcY5sBKbBUU08YM/7fMDewo5KZ+dMI25NBA2spRJ4B5gsFR+K
 e7QrGRSX43HPeGlAS7xBikkzyhKckqm05GNvUCqSiIR5BB0ddpV01JAF6U/zHxTG
 dN/h/k9STUZBKOMANwTNt9lM1q5rfqSyDkFEspeumzfpiIfnoYea7gX5iLRxhvG1
 sfn7uBem9hRIhywShGRgyh2+huzrsVm0K8rGksunqSqvxVGtIF1XBLWqzfd3mnlY
 TYBXBZlJGawqavHF9fgjTLvd9PECUggv6mJzkLbueRDcA1qzyVw3VDE6iGfIqOR4
 R2ROzFV5SbVP5Q/E6wLfLhZjqImTDw==
 =/0bX
 -----END PGP SIGNATURE-----

Merge tag 'mlx5-fixes-2022-02-23' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5 fixes 2022-02-22

This series provides bug fixes to mlx5 driver.
Please pull and let me know if there is any problem.

* tag 'mlx5-fixes-2022-02-23' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux:
  net/mlx5e: Fix VF min/max rate parameters interchange mistake
  net/mlx5e: Add missing increment of count
  net/mlx5e: MPLSoUDP decap, fix check for unsupported matches
  net/mlx5e: Fix MPLSoUDP encap to use MPLS action information
  net/mlx5e: Add feature check for set fec counters
  net/mlx5e: TC, Skip redundant ct clear actions
  net/mlx5e: TC, Reject rules with forward and drop actions
  net/mlx5e: TC, Reject rules with drop and modify hdr action
  net/mlx5e: kTLS, Use CHECKSUM_UNNECESSARY for device-offloaded packets
  net/mlx5e: Fix wrong return value on ioctl EEPROM query failure
  net/mlx5: Fix possible deadlock on rule deletion
  net/mlx5: Fix tc max supported prio for nic mode
  net/mlx5: Fix wrong limitation of metadata match on ecpf
  net/mlx5: Update log_max_qp value to be 17 at most
  net/mlx5: DR, Fix the threshold that defines when pool sync is initiated
  net/mlx5: DR, Don't allow match on IP w/o matching on full ethertype/ip_version
  net/mlx5: DR, Fix slab-out-of-bounds in mlx5_cmd_dr_create_fte
  net/mlx5: DR, Cache STE shadow memory
  net/mlx5: Update the list of the PCI supported devices
====================

Link: https://lore.kernel.org/r/20220224001123.365265-1-saeed@kernel.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski 2022-02-23 20:30:00 -08:00
commit 5facf49702
24 changed files with 236 additions and 94 deletions

View File

@ -16,11 +16,13 @@ struct mlx5e_tc_act_parse_state {
unsigned int num_actions;
struct mlx5e_tc_flow *flow;
struct netlink_ext_ack *extack;
bool ct_clear;
bool encap;
bool decap;
bool mpls_push;
bool ptype_host;
const struct ip_tunnel_info *tun_info;
struct mlx5e_mpls_info mpls_info;
struct pedit_headers_action hdrs[__PEDIT_CMD_MAX];
int ifindexes[MLX5_MAX_FLOW_FWD_VPORTS];
int if_count;

View File

@ -27,8 +27,13 @@ tc_act_parse_ct(struct mlx5e_tc_act_parse_state *parse_state,
struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
bool clear_action = act->ct.action & TCA_CT_ACT_CLEAR;
int err;
/* It's redundant to do ct clear more than once. */
if (clear_action && parse_state->ct_clear)
return 0;
err = mlx5_tc_ct_parse_action(parse_state->ct_priv, attr,
&attr->parse_attr->mod_hdr_acts,
act, parse_state->extack);
@ -40,6 +45,8 @@ tc_act_parse_ct(struct mlx5e_tc_act_parse_state *parse_state,
if (mlx5e_is_eswitch_flow(parse_state->flow))
attr->esw_attr->split_count = attr->esw_attr->out_count;
parse_state->ct_clear = clear_action;
return 0;
}

View File

@ -177,6 +177,12 @@ parse_mirred_encap(struct mlx5e_tc_act_parse_state *parse_state,
return -ENOMEM;
parse_state->encap = false;
if (parse_state->mpls_push) {
memcpy(&parse_attr->mpls_info[esw_attr->out_count],
&parse_state->mpls_info, sizeof(parse_state->mpls_info));
parse_state->mpls_push = false;
}
esw_attr->dests[esw_attr->out_count].flags |= MLX5_ESW_DEST_ENCAP;
esw_attr->out_count++;
/* attr->dests[].rep is resolved when we handle encap */

View File

@ -22,6 +22,16 @@ tc_act_can_offload_mpls_push(struct mlx5e_tc_act_parse_state *parse_state,
return true;
}
static void
copy_mpls_info(struct mlx5e_mpls_info *mpls_info,
const struct flow_action_entry *act)
{
mpls_info->label = act->mpls_push.label;
mpls_info->tc = act->mpls_push.tc;
mpls_info->bos = act->mpls_push.bos;
mpls_info->ttl = act->mpls_push.ttl;
}
static int
tc_act_parse_mpls_push(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
@ -29,6 +39,7 @@ tc_act_parse_mpls_push(struct mlx5e_tc_act_parse_state *parse_state,
struct mlx5_flow_attr *attr)
{
parse_state->mpls_push = true;
copy_mpls_info(&parse_state->mpls_info, act);
return 0;
}

View File

@ -35,6 +35,7 @@ enum {
struct mlx5e_tc_flow_parse_attr {
const struct ip_tunnel_info *tun_info[MLX5_MAX_FLOW_FWD_VPORTS];
struct mlx5e_mpls_info mpls_info[MLX5_MAX_FLOW_FWD_VPORTS];
struct net_device *filter_dev;
struct mlx5_flow_spec spec;
struct mlx5e_tc_mod_hdr_acts mod_hdr_acts;

View File

@ -750,6 +750,7 @@ int mlx5e_attach_encap(struct mlx5e_priv *priv,
struct mlx5e_tc_flow_parse_attr *parse_attr;
struct mlx5_flow_attr *attr = flow->attr;
const struct ip_tunnel_info *tun_info;
const struct mlx5e_mpls_info *mpls_info;
unsigned long tbl_time_before = 0;
struct mlx5e_encap_entry *e;
struct mlx5e_encap_key key;
@ -760,6 +761,7 @@ int mlx5e_attach_encap(struct mlx5e_priv *priv,
parse_attr = attr->parse_attr;
tun_info = parse_attr->tun_info[out_index];
mpls_info = &parse_attr->mpls_info[out_index];
family = ip_tunnel_info_af(tun_info);
key.ip_tun_key = &tun_info->key;
key.tc_tunnel = mlx5e_get_tc_tun(mirred_dev);
@ -810,6 +812,7 @@ int mlx5e_attach_encap(struct mlx5e_priv *priv,
goto out_err_init;
}
e->tun_info = tun_info;
memcpy(&e->mpls_info, mpls_info, sizeof(*mpls_info));
err = mlx5e_tc_tun_init_encap_attr(mirred_dev, priv, e, extack);
if (err)
goto out_err_init;

View File

@ -30,16 +30,15 @@ static int generate_ip_tun_hdr(char buf[],
struct mlx5e_encap_entry *r)
{
const struct ip_tunnel_key *tun_key = &r->tun_info->key;
const struct mlx5e_mpls_info *mpls_info = &r->mpls_info;
struct udphdr *udp = (struct udphdr *)(buf);
struct mpls_shim_hdr *mpls;
u32 tun_id;
tun_id = be32_to_cpu(tunnel_id_to_key32(tun_key->tun_id));
mpls = (struct mpls_shim_hdr *)(udp + 1);
*ip_proto = IPPROTO_UDP;
udp->dest = tun_key->tp_dst;
*mpls = mpls_entry_encode(tun_id, tun_key->ttl, tun_key->tos, true);
*mpls = mpls_entry_encode(mpls_info->label, mpls_info->ttl, mpls_info->tc, mpls_info->bos);
return 0;
}
@ -60,37 +59,31 @@ static int parse_tunnel(struct mlx5e_priv *priv,
void *headers_v)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
struct flow_match_enc_keyid enc_keyid;
struct flow_match_mpls match;
void *misc2_c;
void *misc2_v;
misc2_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
misc_parameters_2);
misc2_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
misc_parameters_2);
if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS))
return 0;
if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID))
return 0;
flow_rule_match_enc_keyid(rule, &enc_keyid);
if (!enc_keyid.mask->keyid)
return 0;
if (!MLX5_CAP_ETH(priv->mdev, tunnel_stateless_mpls_over_udp) &&
!(MLX5_CAP_GEN(priv->mdev, flex_parser_protocols) & MLX5_FLEX_PROTO_CW_MPLS_UDP))
return -EOPNOTSUPP;
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID))
return -EOPNOTSUPP;
if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS))
return 0;
flow_rule_match_mpls(rule, &match);
/* Only support matching the first LSE */
if (match.mask->used_lses != 1)
return -EOPNOTSUPP;
misc2_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
misc_parameters_2);
misc2_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
misc_parameters_2);
MLX5_SET(fte_match_set_misc2, misc2_c,
outer_first_mpls_over_udp.mpls_label,
match.mask->ls[0].mpls_label);

View File

@ -1792,7 +1792,7 @@ static int mlx5e_get_module_eeprom(struct net_device *netdev,
if (size_read < 0) {
netdev_err(priv->netdev, "%s: mlx5_query_eeprom failed:0x%x\n",
__func__, size_read);
return 0;
return size_read;
}
i += size_read;

View File

@ -183,6 +183,13 @@ struct mlx5e_decap_entry {
struct rcu_head rcu;
};
struct mlx5e_mpls_info {
u32 label;
u8 tc;
u8 bos;
u8 ttl;
};
struct mlx5e_encap_entry {
/* attached neigh hash entry */
struct mlx5e_neigh_hash_entry *nhe;
@ -196,6 +203,7 @@ struct mlx5e_encap_entry {
struct list_head route_list;
struct mlx5_pkt_reformat *pkt_reformat;
const struct ip_tunnel_info *tun_info;
struct mlx5e_mpls_info mpls_info;
unsigned char h_dest[ETH_ALEN]; /* destination eth addr */
struct net_device *out_dev;

View File

@ -1349,7 +1349,8 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
}
/* True when explicitly set via priv flag, or XDP prog is loaded */
if (test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state))
if (test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state) ||
get_cqe_tls_offload(cqe))
goto csum_unnecessary;
/* CQE csum doesn't cover padding octets in short ethernet

View File

@ -334,6 +334,7 @@ void mlx5e_self_test(struct net_device *ndev, struct ethtool_test *etest,
netdev_info(ndev, "\t[%d] %s start..\n", i, st.name);
buf[count] = st.st_func(priv);
netdev_info(ndev, "\t[%d] %s end: result(%lld)\n", i, st.name, buf[count]);
count++;
}
mutex_unlock(&priv->state_lock);

View File

@ -1254,9 +1254,6 @@ static void fec_set_corrected_bits_total(struct mlx5e_priv *priv,
u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
return;
MLX5_SET(ppcnt_reg, in, local_port, 1);
MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP);
if (mlx5_core_access_reg(mdev, in, sz, ppcnt_phy_statistical,
@ -1272,6 +1269,9 @@ static void fec_set_corrected_bits_total(struct mlx5e_priv *priv,
void mlx5e_stats_fec_get(struct mlx5e_priv *priv,
struct ethtool_fec_stats *fec_stats)
{
if (!MLX5_CAP_PCAM_FEATURE(priv->mdev, ppcnt_statistical_group))
return;
fec_set_corrected_bits_total(priv, fec_stats);
fec_set_block_stats(priv, fec_stats);
}

View File

@ -3204,6 +3204,18 @@ actions_match_supported(struct mlx5e_priv *priv,
return false;
}
if (!(~actions &
(MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) {
NL_SET_ERR_MSG_MOD(extack, "Rule cannot support forward+drop action");
return false;
}
if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR &&
actions & MLX5_FLOW_CONTEXT_ACTION_DROP) {
NL_SET_ERR_MSG_MOD(extack, "Drop with modify header action is not supported");
return false;
}
if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR &&
!modify_header_match_supported(priv, &parse_attr->spec, flow_action,
actions, ct_flow, ct_clear, extack))

View File

@ -697,7 +697,7 @@ void mlx5_esw_qos_vport_disable(struct mlx5_eswitch *esw, struct mlx5_vport *vpo
}
int mlx5_esw_qos_set_vport_rate(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
u32 min_rate, u32 max_rate)
u32 max_rate, u32 min_rate)
{
int err;

View File

@ -2838,10 +2838,6 @@ bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw)
if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source))
return false;
if (mlx5_core_is_ecpf_esw_manager(esw->dev) ||
mlx5_ecpf_vport_exists(esw->dev))
return false;
return true;
}

View File

@ -2074,6 +2074,8 @@ void mlx5_del_flow_rules(struct mlx5_flow_handle *handle)
fte->node.del_hw_func = NULL;
up_write_ref_node(&fte->node, false);
tree_put_node(&fte->node, false);
} else {
up_write_ref_node(&fte->node, false);
}
kfree(handle);
}

View File

@ -121,6 +121,9 @@ u32 mlx5_chains_get_nf_ft_chain(struct mlx5_fs_chains *chains)
u32 mlx5_chains_get_prio_range(struct mlx5_fs_chains *chains)
{
if (!mlx5_chains_prios_supported(chains))
return 1;
if (mlx5_chains_ignore_flow_level_supported(chains))
return UINT_MAX;

View File

@ -526,7 +526,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
/* Check log_max_qp from HCA caps to set in current profile */
if (prof->log_max_qp == LOG_MAX_SUPPORTED_QPS) {
prof->log_max_qp = MLX5_CAP_GEN_MAX(dev, log_max_qp);
prof->log_max_qp = min_t(u8, 17, MLX5_CAP_GEN_MAX(dev, log_max_qp));
} else if (MLX5_CAP_GEN_MAX(dev, log_max_qp) < prof->log_max_qp) {
mlx5_core_warn(dev, "log_max_qp value in current profile is %d, changing it to HCA capability limit (%d)\n",
prof->log_max_qp,
@ -1840,10 +1840,12 @@ static const struct pci_device_id mlx5_core_pci_table[] = {
{ PCI_VDEVICE(MELLANOX, 0x101e), MLX5_PCI_DEV_IS_VF}, /* ConnectX Family mlx5Gen Virtual Function */
{ PCI_VDEVICE(MELLANOX, 0x101f) }, /* ConnectX-6 LX */
{ PCI_VDEVICE(MELLANOX, 0x1021) }, /* ConnectX-7 */
{ PCI_VDEVICE(MELLANOX, 0x1023) }, /* ConnectX-8 */
{ PCI_VDEVICE(MELLANOX, 0xa2d2) }, /* BlueField integrated ConnectX-5 network controller */
{ PCI_VDEVICE(MELLANOX, 0xa2d3), MLX5_PCI_DEV_IS_VF}, /* BlueField integrated ConnectX-5 network controller VF */
{ PCI_VDEVICE(MELLANOX, 0xa2d6) }, /* BlueField-2 integrated ConnectX-6 Dx network controller */
{ PCI_VDEVICE(MELLANOX, 0xa2dc) }, /* BlueField-3 integrated ConnectX-7 network controller */
{ PCI_VDEVICE(MELLANOX, 0xa2df) }, /* BlueField-4 integrated ConnectX-8 network controller */
{ 0, }
};

View File

@ -4,7 +4,6 @@
#include "dr_types.h"
#define DR_ICM_MODIFY_HDR_ALIGN_BASE 64
#define DR_ICM_SYNC_THRESHOLD_POOL (64 * 1024 * 1024)
struct mlx5dr_icm_pool {
enum mlx5dr_icm_type icm_type;
@ -136,37 +135,35 @@ static void dr_icm_pool_mr_destroy(struct mlx5dr_icm_mr *icm_mr)
kvfree(icm_mr);
}
static int dr_icm_chunk_ste_init(struct mlx5dr_icm_chunk *chunk)
static int dr_icm_buddy_get_ste_size(struct mlx5dr_icm_buddy_mem *buddy)
{
chunk->ste_arr = kvzalloc(chunk->num_of_entries *
sizeof(chunk->ste_arr[0]), GFP_KERNEL);
if (!chunk->ste_arr)
return -ENOMEM;
/* We support only one type of STE size, both for ConnectX-5 and later
* devices. Once the support for match STE which has a larger tag is
* added (32B instead of 16B), the STE size for devices later than
* ConnectX-5 needs to account for that.
*/
return DR_STE_SIZE_REDUCED;
}
chunk->hw_ste_arr = kvzalloc(chunk->num_of_entries *
DR_STE_SIZE_REDUCED, GFP_KERNEL);
if (!chunk->hw_ste_arr)
goto out_free_ste_arr;
static void dr_icm_chunk_ste_init(struct mlx5dr_icm_chunk *chunk, int offset)
{
struct mlx5dr_icm_buddy_mem *buddy = chunk->buddy_mem;
int index = offset / DR_STE_SIZE;
chunk->miss_list = kvmalloc(chunk->num_of_entries *
sizeof(chunk->miss_list[0]), GFP_KERNEL);
if (!chunk->miss_list)
goto out_free_hw_ste_arr;
return 0;
out_free_hw_ste_arr:
kvfree(chunk->hw_ste_arr);
out_free_ste_arr:
kvfree(chunk->ste_arr);
return -ENOMEM;
chunk->ste_arr = &buddy->ste_arr[index];
chunk->miss_list = &buddy->miss_list[index];
chunk->hw_ste_arr = buddy->hw_ste_arr +
index * dr_icm_buddy_get_ste_size(buddy);
}
static void dr_icm_chunk_ste_cleanup(struct mlx5dr_icm_chunk *chunk)
{
kvfree(chunk->miss_list);
kvfree(chunk->hw_ste_arr);
kvfree(chunk->ste_arr);
struct mlx5dr_icm_buddy_mem *buddy = chunk->buddy_mem;
memset(chunk->hw_ste_arr, 0,
chunk->num_of_entries * dr_icm_buddy_get_ste_size(buddy));
memset(chunk->ste_arr, 0,
chunk->num_of_entries * sizeof(chunk->ste_arr[0]));
}
static enum mlx5dr_icm_type
@ -189,6 +186,44 @@ static void dr_icm_chunk_destroy(struct mlx5dr_icm_chunk *chunk,
kvfree(chunk);
}
static int dr_icm_buddy_init_ste_cache(struct mlx5dr_icm_buddy_mem *buddy)
{
int num_of_entries =
mlx5dr_icm_pool_chunk_size_to_entries(buddy->pool->max_log_chunk_sz);
buddy->ste_arr = kvcalloc(num_of_entries,
sizeof(struct mlx5dr_ste), GFP_KERNEL);
if (!buddy->ste_arr)
return -ENOMEM;
/* Preallocate full STE size on non-ConnectX-5 devices since
* we need to support both full and reduced with the same cache.
*/
buddy->hw_ste_arr = kvcalloc(num_of_entries,
dr_icm_buddy_get_ste_size(buddy), GFP_KERNEL);
if (!buddy->hw_ste_arr)
goto free_ste_arr;
buddy->miss_list = kvmalloc(num_of_entries * sizeof(struct list_head), GFP_KERNEL);
if (!buddy->miss_list)
goto free_hw_ste_arr;
return 0;
free_hw_ste_arr:
kvfree(buddy->hw_ste_arr);
free_ste_arr:
kvfree(buddy->ste_arr);
return -ENOMEM;
}
static void dr_icm_buddy_cleanup_ste_cache(struct mlx5dr_icm_buddy_mem *buddy)
{
kvfree(buddy->ste_arr);
kvfree(buddy->hw_ste_arr);
kvfree(buddy->miss_list);
}
static int dr_icm_buddy_create(struct mlx5dr_icm_pool *pool)
{
struct mlx5dr_icm_buddy_mem *buddy;
@ -208,11 +243,19 @@ static int dr_icm_buddy_create(struct mlx5dr_icm_pool *pool)
buddy->icm_mr = icm_mr;
buddy->pool = pool;
if (pool->icm_type == DR_ICM_TYPE_STE) {
/* Reduce allocations by preallocating and reusing the STE structures */
if (dr_icm_buddy_init_ste_cache(buddy))
goto err_cleanup_buddy;
}
/* add it to the -start- of the list in order to search in it first */
list_add(&buddy->list_node, &pool->buddy_mem_list);
return 0;
err_cleanup_buddy:
mlx5dr_buddy_cleanup(buddy);
err_free_buddy:
kvfree(buddy);
free_mr:
@ -234,6 +277,9 @@ static void dr_icm_buddy_destroy(struct mlx5dr_icm_buddy_mem *buddy)
mlx5dr_buddy_cleanup(buddy);
if (buddy->pool->icm_type == DR_ICM_TYPE_STE)
dr_icm_buddy_cleanup_ste_cache(buddy);
kvfree(buddy);
}
@ -261,34 +307,30 @@ dr_icm_chunk_create(struct mlx5dr_icm_pool *pool,
chunk->byte_size =
mlx5dr_icm_pool_chunk_size_to_byte(chunk_size, pool->icm_type);
chunk->seg = seg;
chunk->buddy_mem = buddy_mem_pool;
if (pool->icm_type == DR_ICM_TYPE_STE && dr_icm_chunk_ste_init(chunk)) {
mlx5dr_err(pool->dmn,
"Failed to init ste arrays (order: %d)\n",
chunk_size);
goto out_free_chunk;
}
if (pool->icm_type == DR_ICM_TYPE_STE)
dr_icm_chunk_ste_init(chunk, offset);
buddy_mem_pool->used_memory += chunk->byte_size;
chunk->buddy_mem = buddy_mem_pool;
INIT_LIST_HEAD(&chunk->chunk_list);
/* chunk now is part of the used_list */
list_add_tail(&chunk->chunk_list, &buddy_mem_pool->used_list);
return chunk;
out_free_chunk:
kvfree(chunk);
return NULL;
}
static bool dr_icm_pool_is_sync_required(struct mlx5dr_icm_pool *pool)
{
if (pool->hot_memory_size > DR_ICM_SYNC_THRESHOLD_POOL)
return true;
int allow_hot_size;
return false;
/* sync when hot memory reaches half of the pool size */
allow_hot_size =
mlx5dr_icm_pool_chunk_size_to_byte(pool->max_log_chunk_sz,
pool->icm_type) / 2;
return pool->hot_memory_size > allow_hot_size;
}
static int dr_icm_pool_sync_all_buddy_pools(struct mlx5dr_icm_pool *pool)

View File

@ -13,18 +13,6 @@ static bool dr_mask_is_dmac_set(struct mlx5dr_match_spec *spec)
return (spec->dmac_47_16 || spec->dmac_15_0);
}
static bool dr_mask_is_src_addr_set(struct mlx5dr_match_spec *spec)
{
return (spec->src_ip_127_96 || spec->src_ip_95_64 ||
spec->src_ip_63_32 || spec->src_ip_31_0);
}
static bool dr_mask_is_dst_addr_set(struct mlx5dr_match_spec *spec)
{
return (spec->dst_ip_127_96 || spec->dst_ip_95_64 ||
spec->dst_ip_63_32 || spec->dst_ip_31_0);
}
static bool dr_mask_is_l3_base_set(struct mlx5dr_match_spec *spec)
{
return (spec->ip_protocol || spec->frag || spec->tcp_flags ||
@ -503,11 +491,11 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
&mask, inner, rx);
if (outer_ipv == DR_RULE_IPV6) {
if (dr_mask_is_dst_addr_set(&mask.outer))
if (DR_MASK_IS_DST_IP_SET(&mask.outer))
mlx5dr_ste_build_eth_l3_ipv6_dst(ste_ctx, &sb[idx++],
&mask, inner, rx);
if (dr_mask_is_src_addr_set(&mask.outer))
if (DR_MASK_IS_SRC_IP_SET(&mask.outer))
mlx5dr_ste_build_eth_l3_ipv6_src(ste_ctx, &sb[idx++],
&mask, inner, rx);
@ -610,11 +598,11 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
&mask, inner, rx);
if (inner_ipv == DR_RULE_IPV6) {
if (dr_mask_is_dst_addr_set(&mask.inner))
if (DR_MASK_IS_DST_IP_SET(&mask.inner))
mlx5dr_ste_build_eth_l3_ipv6_dst(ste_ctx, &sb[idx++],
&mask, inner, rx);
if (dr_mask_is_src_addr_set(&mask.inner))
if (DR_MASK_IS_SRC_IP_SET(&mask.inner))
mlx5dr_ste_build_eth_l3_ipv6_src(ste_ctx, &sb[idx++],
&mask, inner, rx);

View File

@ -602,12 +602,34 @@ int mlx5dr_ste_set_action_decap_l3_list(struct mlx5dr_ste_ctx *ste_ctx,
used_hw_action_num);
}
static int dr_ste_build_pre_check_spec(struct mlx5dr_domain *dmn,
struct mlx5dr_match_spec *spec)
{
if (spec->ip_version) {
if (spec->ip_version != 0xf) {
mlx5dr_err(dmn,
"Partial ip_version mask with src/dst IP is not supported\n");
return -EINVAL;
}
} else if (spec->ethertype != 0xffff &&
(DR_MASK_IS_SRC_IP_SET(spec) || DR_MASK_IS_DST_IP_SET(spec))) {
mlx5dr_err(dmn,
"Partial/no ethertype mask with src/dst IP is not supported\n");
return -EINVAL;
}
return 0;
}
int mlx5dr_ste_build_pre_check(struct mlx5dr_domain *dmn,
u8 match_criteria,
struct mlx5dr_match_param *mask,
struct mlx5dr_match_param *value)
{
if (!value && (match_criteria & DR_MATCHER_CRITERIA_MISC)) {
if (value)
return 0;
if (match_criteria & DR_MATCHER_CRITERIA_MISC) {
if (mask->misc.source_port && mask->misc.source_port != 0xffff) {
mlx5dr_err(dmn,
"Partial mask source_port is not supported\n");
@ -621,6 +643,14 @@ int mlx5dr_ste_build_pre_check(struct mlx5dr_domain *dmn,
}
}
if ((match_criteria & DR_MATCHER_CRITERIA_OUTER) &&
dr_ste_build_pre_check_spec(dmn, &mask->outer))
return -EINVAL;
if ((match_criteria & DR_MATCHER_CRITERIA_INNER) &&
dr_ste_build_pre_check_spec(dmn, &mask->inner))
return -EINVAL;
return 0;
}

View File

@ -798,6 +798,16 @@ struct mlx5dr_match_param {
(_misc3)->icmpv4_code || \
(_misc3)->icmpv4_header_data)
#define DR_MASK_IS_SRC_IP_SET(_spec) ((_spec)->src_ip_127_96 || \
(_spec)->src_ip_95_64 || \
(_spec)->src_ip_63_32 || \
(_spec)->src_ip_31_0)
#define DR_MASK_IS_DST_IP_SET(_spec) ((_spec)->dst_ip_127_96 || \
(_spec)->dst_ip_95_64 || \
(_spec)->dst_ip_63_32 || \
(_spec)->dst_ip_31_0)
struct mlx5dr_esw_caps {
u64 drop_icm_address_rx;
u64 drop_icm_address_tx;

View File

@ -233,7 +233,11 @@ static bool contain_vport_reformat_action(struct mlx5_flow_rule *dst)
dst->dest_attr.vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
}
#define MLX5_FLOW_CONTEXT_ACTION_MAX 32
/* We want to support a rule with 32 destinations, which means we need to
* account for 32 destinations plus usually a counter plus one more action
* for a multi-destination flow table.
*/
#define MLX5_FLOW_CONTEXT_ACTION_MAX 34
static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct mlx5_flow_group *group,
@ -403,9 +407,9 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
enum mlx5_flow_destination_type type = dst->dest_attr.type;
u32 id;
if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
num_term_actions >= MLX5_FLOW_CONTEXT_ACTION_MAX) {
err = -ENOSPC;
if (fs_dr_num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
num_term_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
err = -EOPNOTSUPP;
goto free_actions;
}
@ -478,8 +482,9 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
MLX5_FLOW_DESTINATION_TYPE_COUNTER)
continue;
if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
err = -ENOSPC;
if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
fs_dr_num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
err = -EOPNOTSUPP;
goto free_actions;
}
@ -499,14 +504,28 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
params.match_sz = match_sz;
params.match_buf = (u64 *)fte->val;
if (num_term_actions == 1) {
if (term_actions->reformat)
if (term_actions->reformat) {
if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
err = -EOPNOTSUPP;
goto free_actions;
}
actions[num_actions++] = term_actions->reformat;
}
if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
err = -EOPNOTSUPP;
goto free_actions;
}
actions[num_actions++] = term_actions->dest;
} else if (num_term_actions > 1) {
bool ignore_flow_level =
!!(fte->action.flags & FLOW_ACT_IGNORE_FLOW_LEVEL);
if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
fs_dr_num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
err = -EOPNOTSUPP;
goto free_actions;
}
tmp_action = mlx5dr_action_create_mult_dest_tbl(domain,
term_actions,
num_term_actions,

View File

@ -160,6 +160,11 @@ struct mlx5dr_icm_buddy_mem {
* sync_ste command sets them free.
*/
struct list_head hot_list;
/* Memory optimisation */
struct mlx5dr_ste *ste_arr;
struct list_head *miss_list;
u8 *hw_ste_arr;
};
int mlx5dr_buddy_init(struct mlx5dr_icm_buddy_mem *buddy,