mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-24 12:44:11 +08:00
mlx5-updates-2021-05-26
Misc update for mlx5 driver, 1) Clean up patches for lag and SF 2) Reserve bit 31 in steering register C1 for IPSec offload usage 3) Move steering tables pool logic into the steering core and increase the maximum table size to 2G entries when software steering is enabled. -----BEGIN PGP SIGNATURE----- iQEzBAABCAAdFiEEGhZs6bAKwk/OTgTpSD+KveBX+j4FAmCv6vAACgkQSD+KveBX +j6qnAgAz0eKWKCsFCqlXGIgF1cg3FrGR5W2Zi5euriHhHwNqnZof3AIMkzcXjLL wBlPjWk3YLfBaBNPTziz6EJuGl1vZZxuSdc7bqsNnl0srujRtQFu3JyerdgXEXNL W2NxjSTiVwu8lq2qlYauQvcE0v+JrB/LMe9tvq1UQ2v9FtBMMhs9hGUSCro2huwj XYF0m0ve89+mYlm6/m0SIUpPVdMiIhm4+coO1wibk7+8jn6+ZT6EJbbZvjc9eQg7 ZKr8f/TpfmvHToG8LPOc6HqHzRiHlp3Yzsft+xm54r082n4F/noGhL+Hqvvj1aTj C6Ip5N7VkzT+erMLMrjIbrmEP94cyQ== =torZ -----END PGP SIGNATURE----- Merge tag 'mlx5-updates-2021-05-26' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux Saeed Mahameed says: ==================== mlx5-updates-2021-05-26 Misc update for mlx5 driver, 1) Clean up patches for lag and SF 2) Reserve bit 31 in steering register C1 for IPSec offload usage 3) Move steering tables pool logic into the steering core and increase the maximum table size to 2G entries when software steering is enabled. * tag 'mlx5-updates-2021-05-26' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux: net/mlx5: Fix lag port remapping logic net/mlx5: Use boolean arithmetic to evaluate roce_lag net/mlx5: Remove unnecessary spin lock protection net/mlx5: Cap the maximum flow group size to 16M entries net/mlx5: DR, Set max table size to 2G entries net/mlx5: Move chains ft pool to be used by all firmware steering net/mlx5: Move table size calculation to steering cmd layer net/mlx5: Add case for FS_FT_NIC_TX FT in MLX5_CAP_FLOWTABLE_TYPE net/mlx5: DR, Remove unused field of send_ring struct net/mlx5e: RX, Remove unnecessary check in RX CQE compression handling net/mlx5e: IPsec/rep_tc: Fix rep_tc_update_skb drops IPsec packet net/mlx5e: TC: Reserved bit 31 of REG_C1 for IPsec offload net/mlx5e: TC: Use bit counts for register mapping net/mlx5: CT: Avoid reusing modify header context for natted entries net/mlx5e: CT, Remove newline from ct_dbg call ==================== Link: https://lore.kernel.org/r/20210527185624.694304-1-saeed@kernel.org Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
af9207adb6
@ -14,7 +14,7 @@ obj-$(CONFIG_MLX5_CORE) += mlx5_core.o
|
||||
mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
|
||||
health.o mcg.o cq.o alloc.o port.o mr.o pd.o \
|
||||
transobj.o vport.o sriov.o fs_cmd.o fs_core.o pci_irq.o \
|
||||
fs_counters.o rl.o lag.o dev.o events.o wq.o lib/gid.o \
|
||||
fs_counters.o fs_ft_pool.o rl.o lag.o dev.o events.o wq.o lib/gid.o \
|
||||
lib/devcom.o lib/pci_vsc.o lib/dm.o diag/fs_tracepoint.o \
|
||||
diag/fw_tracer.o diag/crdump.o devlink.o diag/rsc_dump.o \
|
||||
fw_reset.o qos.o
|
||||
|
@ -617,7 +617,7 @@ static bool mlx5e_restore_skb(struct sk_buff *skb, u32 chain, u32 reg_c1,
|
||||
struct mlx5e_tc_update_priv *tc_priv)
|
||||
{
|
||||
struct mlx5e_priv *priv = netdev_priv(skb->dev);
|
||||
u32 tunnel_id = reg_c1 >> ESW_TUN_OFFSET;
|
||||
u32 tunnel_id = (reg_c1 >> ESW_TUN_OFFSET) & TUNNEL_ID_MASK;
|
||||
|
||||
if (chain) {
|
||||
struct mlx5_rep_uplink_priv *uplink_priv;
|
||||
|
@ -23,7 +23,7 @@
|
||||
#include "en_tc.h"
|
||||
#include "en_rep.h"
|
||||
|
||||
#define MLX5_CT_ZONE_BITS (mlx5e_tc_attr_to_reg_mappings[ZONE_TO_REG].mlen * 8)
|
||||
#define MLX5_CT_ZONE_BITS (mlx5e_tc_attr_to_reg_mappings[ZONE_TO_REG].mlen)
|
||||
#define MLX5_CT_ZONE_MASK GENMASK(MLX5_CT_ZONE_BITS - 1, 0)
|
||||
#define MLX5_CT_STATE_ESTABLISHED_BIT BIT(1)
|
||||
#define MLX5_CT_STATE_TRK_BIT BIT(2)
|
||||
@ -32,11 +32,11 @@
|
||||
#define MLX5_CT_STATE_RELATED_BIT BIT(5)
|
||||
#define MLX5_CT_STATE_INVALID_BIT BIT(6)
|
||||
|
||||
#define MLX5_FTE_ID_BITS (mlx5e_tc_attr_to_reg_mappings[FTEID_TO_REG].mlen * 8)
|
||||
#define MLX5_FTE_ID_BITS (mlx5e_tc_attr_to_reg_mappings[FTEID_TO_REG].mlen)
|
||||
#define MLX5_FTE_ID_MAX GENMASK(MLX5_FTE_ID_BITS - 1, 0)
|
||||
#define MLX5_FTE_ID_MASK MLX5_FTE_ID_MAX
|
||||
|
||||
#define MLX5_CT_LABELS_BITS (mlx5e_tc_attr_to_reg_mappings[LABELS_TO_REG].mlen * 8)
|
||||
#define MLX5_CT_LABELS_BITS (mlx5e_tc_attr_to_reg_mappings[LABELS_TO_REG].mlen)
|
||||
#define MLX5_CT_LABELS_MASK GENMASK(MLX5_CT_LABELS_BITS - 1, 0)
|
||||
|
||||
#define ct_dbg(fmt, args...)\
|
||||
@ -150,6 +150,11 @@ struct mlx5_ct_entry {
|
||||
unsigned long flags;
|
||||
};
|
||||
|
||||
static void
|
||||
mlx5_tc_ct_entry_destroy_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
|
||||
struct mlx5_flow_attr *attr,
|
||||
struct mlx5e_mod_hdr_handle *mh);
|
||||
|
||||
static const struct rhashtable_params cts_ht_params = {
|
||||
.head_offset = offsetof(struct mlx5_ct_entry, node),
|
||||
.key_offset = offsetof(struct mlx5_ct_entry, cookie),
|
||||
@ -458,8 +463,7 @@ mlx5_tc_ct_entry_del_rule(struct mlx5_tc_ct_priv *ct_priv,
|
||||
ct_dbg("Deleting ct entry rule in zone %d", entry->tuple.zone);
|
||||
|
||||
mlx5_tc_rule_delete(netdev_priv(ct_priv->netdev), zone_rule->rule, attr);
|
||||
mlx5e_mod_hdr_detach(ct_priv->dev,
|
||||
ct_priv->mod_hdr_tbl, zone_rule->mh);
|
||||
mlx5_tc_ct_entry_destroy_mod_hdr(ct_priv, zone_rule->attr, zone_rule->mh);
|
||||
mlx5_put_label_mapping(ct_priv, attr->ct_attr.ct_labels_id);
|
||||
kfree(attr);
|
||||
}
|
||||
@ -686,15 +690,27 @@ mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
|
||||
if (err)
|
||||
goto err_mapping;
|
||||
|
||||
*mh = mlx5e_mod_hdr_attach(ct_priv->dev,
|
||||
ct_priv->mod_hdr_tbl,
|
||||
ct_priv->ns_type,
|
||||
&mod_acts);
|
||||
if (IS_ERR(*mh)) {
|
||||
err = PTR_ERR(*mh);
|
||||
goto err_mapping;
|
||||
if (nat) {
|
||||
attr->modify_hdr = mlx5_modify_header_alloc(ct_priv->dev, ct_priv->ns_type,
|
||||
mod_acts.num_actions,
|
||||
mod_acts.actions);
|
||||
if (IS_ERR(attr->modify_hdr)) {
|
||||
err = PTR_ERR(attr->modify_hdr);
|
||||
goto err_mapping;
|
||||
}
|
||||
|
||||
*mh = NULL;
|
||||
} else {
|
||||
*mh = mlx5e_mod_hdr_attach(ct_priv->dev,
|
||||
ct_priv->mod_hdr_tbl,
|
||||
ct_priv->ns_type,
|
||||
&mod_acts);
|
||||
if (IS_ERR(*mh)) {
|
||||
err = PTR_ERR(*mh);
|
||||
goto err_mapping;
|
||||
}
|
||||
attr->modify_hdr = mlx5e_mod_hdr_get(*mh);
|
||||
}
|
||||
attr->modify_hdr = mlx5e_mod_hdr_get(*mh);
|
||||
|
||||
dealloc_mod_hdr_actions(&mod_acts);
|
||||
return 0;
|
||||
@ -705,6 +721,17 @@ err_mapping:
|
||||
return err;
|
||||
}
|
||||
|
||||
static void
|
||||
mlx5_tc_ct_entry_destroy_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
|
||||
struct mlx5_flow_attr *attr,
|
||||
struct mlx5e_mod_hdr_handle *mh)
|
||||
{
|
||||
if (mh)
|
||||
mlx5e_mod_hdr_detach(ct_priv->dev, ct_priv->mod_hdr_tbl, mh);
|
||||
else
|
||||
mlx5_modify_header_dealloc(ct_priv->dev, attr->modify_hdr);
|
||||
}
|
||||
|
||||
static int
|
||||
mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
|
||||
struct flow_rule *flow_rule,
|
||||
@ -767,8 +794,7 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
|
||||
return 0;
|
||||
|
||||
err_rule:
|
||||
mlx5e_mod_hdr_detach(ct_priv->dev,
|
||||
ct_priv->mod_hdr_tbl, zone_rule->mh);
|
||||
mlx5_tc_ct_entry_destroy_mod_hdr(ct_priv, zone_rule->attr, zone_rule->mh);
|
||||
mlx5_put_label_mapping(ct_priv, attr->ct_attr.ct_labels_id);
|
||||
err_mod_hdr:
|
||||
kfree(attr);
|
||||
@ -918,7 +944,7 @@ mlx5_tc_ct_shared_counter_get(struct mlx5_tc_ct_priv *ct_priv,
|
||||
}
|
||||
|
||||
if (rev_entry && refcount_inc_not_zero(&rev_entry->counter->refcount)) {
|
||||
ct_dbg("Using shared counter entry=0x%p rev=0x%p\n", entry, rev_entry);
|
||||
ct_dbg("Using shared counter entry=0x%p rev=0x%p", entry, rev_entry);
|
||||
shared_counter = rev_entry->counter;
|
||||
spin_unlock_bh(&ct_priv->ht_lock);
|
||||
|
||||
|
@ -33,15 +33,15 @@ struct mlx5_ct_attr {
|
||||
#define zone_to_reg_ct {\
|
||||
.mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_2,\
|
||||
.moffset = 0,\
|
||||
.mlen = 2,\
|
||||
.mlen = 16,\
|
||||
.soffset = MLX5_BYTE_OFF(fte_match_param,\
|
||||
misc_parameters_2.metadata_reg_c_2) + 2,\
|
||||
misc_parameters_2.metadata_reg_c_2),\
|
||||
}
|
||||
|
||||
#define ctstate_to_reg_ct {\
|
||||
.mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_2,\
|
||||
.moffset = 2,\
|
||||
.mlen = 2,\
|
||||
.moffset = 16,\
|
||||
.mlen = 16,\
|
||||
.soffset = MLX5_BYTE_OFF(fte_match_param,\
|
||||
misc_parameters_2.metadata_reg_c_2),\
|
||||
}
|
||||
@ -49,7 +49,7 @@ struct mlx5_ct_attr {
|
||||
#define mark_to_reg_ct {\
|
||||
.mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_3,\
|
||||
.moffset = 0,\
|
||||
.mlen = 4,\
|
||||
.mlen = 32,\
|
||||
.soffset = MLX5_BYTE_OFF(fte_match_param,\
|
||||
misc_parameters_2.metadata_reg_c_3),\
|
||||
}
|
||||
@ -57,7 +57,7 @@ struct mlx5_ct_attr {
|
||||
#define labels_to_reg_ct {\
|
||||
.mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_4,\
|
||||
.moffset = 0,\
|
||||
.mlen = 4,\
|
||||
.mlen = 32,\
|
||||
.soffset = MLX5_BYTE_OFF(fte_match_param,\
|
||||
misc_parameters_2.metadata_reg_c_4),\
|
||||
}
|
||||
@ -65,7 +65,7 @@ struct mlx5_ct_attr {
|
||||
#define fteid_to_reg_ct {\
|
||||
.mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_5,\
|
||||
.moffset = 0,\
|
||||
.mlen = 4,\
|
||||
.mlen = 32,\
|
||||
.soffset = MLX5_BYTE_OFF(fte_match_param,\
|
||||
misc_parameters_2.metadata_reg_c_5),\
|
||||
}
|
||||
@ -73,20 +73,19 @@ struct mlx5_ct_attr {
|
||||
#define zone_restore_to_reg_ct {\
|
||||
.mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_1,\
|
||||
.moffset = 0,\
|
||||
.mlen = (ESW_ZONE_ID_BITS / 8),\
|
||||
.mlen = ESW_ZONE_ID_BITS,\
|
||||
.soffset = MLX5_BYTE_OFF(fte_match_param,\
|
||||
misc_parameters_2.metadata_reg_c_1) + 3,\
|
||||
misc_parameters_2.metadata_reg_c_1),\
|
||||
}
|
||||
|
||||
#define nic_zone_restore_to_reg_ct {\
|
||||
.mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_B,\
|
||||
.moffset = 2,\
|
||||
.mlen = (ESW_ZONE_ID_BITS / 8),\
|
||||
.moffset = 16,\
|
||||
.mlen = ESW_ZONE_ID_BITS,\
|
||||
}
|
||||
|
||||
#define REG_MAPPING_MLEN(reg) (mlx5e_tc_attr_to_reg_mappings[reg].mlen)
|
||||
#define REG_MAPPING_MOFFSET(reg) (mlx5e_tc_attr_to_reg_mappings[reg].moffset)
|
||||
#define REG_MAPPING_SHIFT(reg) (REG_MAPPING_MOFFSET(reg) * 8)
|
||||
|
||||
#if IS_ENABLED(CONFIG_MLX5_TC_CT)
|
||||
|
||||
|
@ -1310,7 +1310,8 @@ static void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
|
||||
if (rep->vlan && skb_vlan_tag_present(skb))
|
||||
skb_vlan_pop(skb);
|
||||
|
||||
if (!mlx5e_rep_tc_update_skb(cqe, skb, &tc_priv)) {
|
||||
if (unlikely(!mlx5_ipsec_is_rx_flow(cqe) &&
|
||||
!mlx5e_rep_tc_update_skb(cqe, skb, &tc_priv))) {
|
||||
dev_kfree_skb_any(skb);
|
||||
goto free_wqe;
|
||||
}
|
||||
@ -1367,7 +1368,8 @@ static void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq, struct mlx5_cqe64
|
||||
|
||||
mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
|
||||
|
||||
if (!mlx5e_rep_tc_update_skb(cqe, skb, &tc_priv)) {
|
||||
if (unlikely(!mlx5_ipsec_is_rx_flow(cqe) &&
|
||||
!mlx5e_rep_tc_update_skb(cqe, skb, &tc_priv))) {
|
||||
dev_kfree_skb_any(skb);
|
||||
goto mpwrq_cqe_out;
|
||||
}
|
||||
@ -1558,7 +1560,7 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
|
||||
|
||||
if (rq->cqd.left) {
|
||||
work_done += mlx5e_decompress_cqes_cont(rq, cqwq, 0, budget);
|
||||
if (rq->cqd.left || work_done >= budget)
|
||||
if (work_done >= budget)
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -83,17 +83,17 @@ struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[] = {
|
||||
[CHAIN_TO_REG] = {
|
||||
.mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_0,
|
||||
.moffset = 0,
|
||||
.mlen = 2,
|
||||
.mlen = 16,
|
||||
},
|
||||
[VPORT_TO_REG] = {
|
||||
.mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_0,
|
||||
.moffset = 2,
|
||||
.mlen = 2,
|
||||
.moffset = 16,
|
||||
.mlen = 16,
|
||||
},
|
||||
[TUNNEL_TO_REG] = {
|
||||
.mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_1,
|
||||
.moffset = 1,
|
||||
.mlen = ((ESW_TUN_OPTS_BITS + ESW_TUN_ID_BITS) / 8),
|
||||
.moffset = 8,
|
||||
.mlen = ESW_TUN_OPTS_BITS + ESW_TUN_ID_BITS,
|
||||
.soffset = MLX5_BYTE_OFF(fte_match_param,
|
||||
misc_parameters_2.metadata_reg_c_1),
|
||||
},
|
||||
@ -110,7 +110,7 @@ struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[] = {
|
||||
[NIC_CHAIN_TO_REG] = {
|
||||
.mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_B,
|
||||
.moffset = 0,
|
||||
.mlen = 2,
|
||||
.mlen = 16,
|
||||
},
|
||||
[NIC_ZONE_RESTORE_TO_REG] = nic_zone_restore_to_reg_ct,
|
||||
};
|
||||
@ -128,23 +128,46 @@ static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow);
|
||||
void
|
||||
mlx5e_tc_match_to_reg_match(struct mlx5_flow_spec *spec,
|
||||
enum mlx5e_tc_attr_to_reg type,
|
||||
u32 data,
|
||||
u32 val,
|
||||
u32 mask)
|
||||
{
|
||||
void *headers_c = spec->match_criteria, *headers_v = spec->match_value, *fmask, *fval;
|
||||
int soffset = mlx5e_tc_attr_to_reg_mappings[type].soffset;
|
||||
int moffset = mlx5e_tc_attr_to_reg_mappings[type].moffset;
|
||||
int match_len = mlx5e_tc_attr_to_reg_mappings[type].mlen;
|
||||
void *headers_c = spec->match_criteria;
|
||||
void *headers_v = spec->match_value;
|
||||
void *fmask, *fval;
|
||||
u32 max_mask = GENMASK(match_len - 1, 0);
|
||||
__be32 curr_mask_be, curr_val_be;
|
||||
u32 curr_mask, curr_val;
|
||||
|
||||
fmask = headers_c + soffset;
|
||||
fval = headers_v + soffset;
|
||||
|
||||
mask = (__force u32)(cpu_to_be32(mask)) >> (32 - (match_len * 8));
|
||||
data = (__force u32)(cpu_to_be32(data)) >> (32 - (match_len * 8));
|
||||
memcpy(&curr_mask_be, fmask, 4);
|
||||
memcpy(&curr_val_be, fval, 4);
|
||||
|
||||
memcpy(fmask, &mask, match_len);
|
||||
memcpy(fval, &data, match_len);
|
||||
curr_mask = be32_to_cpu(curr_mask_be);
|
||||
curr_val = be32_to_cpu(curr_val_be);
|
||||
|
||||
//move to correct offset
|
||||
WARN_ON(mask > max_mask);
|
||||
mask <<= moffset;
|
||||
val <<= moffset;
|
||||
max_mask <<= moffset;
|
||||
|
||||
//zero val and mask
|
||||
curr_mask &= ~max_mask;
|
||||
curr_val &= ~max_mask;
|
||||
|
||||
//add current to mask
|
||||
curr_mask |= mask;
|
||||
curr_val |= val;
|
||||
|
||||
//back to be32 and write
|
||||
curr_mask_be = cpu_to_be32(curr_mask);
|
||||
curr_val_be = cpu_to_be32(curr_val);
|
||||
|
||||
memcpy(fmask, &curr_mask_be, 4);
|
||||
memcpy(fval, &curr_val_be, 4);
|
||||
|
||||
spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
|
||||
}
|
||||
@ -152,23 +175,28 @@ mlx5e_tc_match_to_reg_match(struct mlx5_flow_spec *spec,
|
||||
void
|
||||
mlx5e_tc_match_to_reg_get_match(struct mlx5_flow_spec *spec,
|
||||
enum mlx5e_tc_attr_to_reg type,
|
||||
u32 *data,
|
||||
u32 *val,
|
||||
u32 *mask)
|
||||
{
|
||||
void *headers_c = spec->match_criteria, *headers_v = spec->match_value, *fmask, *fval;
|
||||
int soffset = mlx5e_tc_attr_to_reg_mappings[type].soffset;
|
||||
int moffset = mlx5e_tc_attr_to_reg_mappings[type].moffset;
|
||||
int match_len = mlx5e_tc_attr_to_reg_mappings[type].mlen;
|
||||
void *headers_c = spec->match_criteria;
|
||||
void *headers_v = spec->match_value;
|
||||
void *fmask, *fval;
|
||||
u32 max_mask = GENMASK(match_len - 1, 0);
|
||||
__be32 curr_mask_be, curr_val_be;
|
||||
u32 curr_mask, curr_val;
|
||||
|
||||
fmask = headers_c + soffset;
|
||||
fval = headers_v + soffset;
|
||||
|
||||
memcpy(mask, fmask, match_len);
|
||||
memcpy(data, fval, match_len);
|
||||
memcpy(&curr_mask_be, fmask, 4);
|
||||
memcpy(&curr_val_be, fval, 4);
|
||||
|
||||
*mask = be32_to_cpu((__force __be32)(*mask << (32 - (match_len * 8))));
|
||||
*data = be32_to_cpu((__force __be32)(*data << (32 - (match_len * 8))));
|
||||
curr_mask = be32_to_cpu(curr_mask_be);
|
||||
curr_val = be32_to_cpu(curr_val_be);
|
||||
|
||||
*mask = (curr_mask >> moffset) & max_mask;
|
||||
*val = (curr_val >> moffset) & max_mask;
|
||||
}
|
||||
|
||||
int
|
||||
@ -192,13 +220,13 @@ mlx5e_tc_match_to_reg_set_and_get_id(struct mlx5_core_dev *mdev,
|
||||
(mod_hdr_acts->num_actions * MLX5_MH_ACT_SZ);
|
||||
|
||||
/* Firmware has 5bit length field and 0 means 32bits */
|
||||
if (mlen == 4)
|
||||
if (mlen == 32)
|
||||
mlen = 0;
|
||||
|
||||
MLX5_SET(set_action_in, modact, action_type, MLX5_ACTION_TYPE_SET);
|
||||
MLX5_SET(set_action_in, modact, field, mfield);
|
||||
MLX5_SET(set_action_in, modact, offset, moffset * 8);
|
||||
MLX5_SET(set_action_in, modact, length, mlen * 8);
|
||||
MLX5_SET(set_action_in, modact, offset, moffset);
|
||||
MLX5_SET(set_action_in, modact, length, mlen);
|
||||
MLX5_SET(set_action_in, modact, data, data);
|
||||
err = mod_hdr_acts->num_actions;
|
||||
mod_hdr_acts->num_actions++;
|
||||
@ -296,13 +324,13 @@ void mlx5e_tc_match_to_reg_mod_hdr_change(struct mlx5_core_dev *mdev,
|
||||
modact = mod_hdr_acts->actions + (act_id * MLX5_MH_ACT_SZ);
|
||||
|
||||
/* Firmware has 5bit length field and 0 means 32bits */
|
||||
if (mlen == 4)
|
||||
if (mlen == 32)
|
||||
mlen = 0;
|
||||
|
||||
MLX5_SET(set_action_in, modact, action_type, MLX5_ACTION_TYPE_SET);
|
||||
MLX5_SET(set_action_in, modact, field, mfield);
|
||||
MLX5_SET(set_action_in, modact, offset, moffset * 8);
|
||||
MLX5_SET(set_action_in, modact, length, mlen * 8);
|
||||
MLX5_SET(set_action_in, modact, offset, moffset);
|
||||
MLX5_SET(set_action_in, modact, length, mlen);
|
||||
MLX5_SET(set_action_in, modact, data, data);
|
||||
}
|
||||
|
||||
@ -5096,7 +5124,7 @@ bool mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe,
|
||||
|
||||
tc_skb_ext->chain = chain;
|
||||
|
||||
zone_restore_id = (reg_b >> REG_MAPPING_SHIFT(NIC_ZONE_RESTORE_TO_REG)) &
|
||||
zone_restore_id = (reg_b >> REG_MAPPING_MOFFSET(NIC_ZONE_RESTORE_TO_REG)) &
|
||||
ESW_ZONE_ID_MASK;
|
||||
|
||||
if (!mlx5e_tc_ct_restore_flow(tc->ct, skb,
|
||||
|
@ -129,7 +129,7 @@ struct tunnel_match_enc_opts {
|
||||
*/
|
||||
#define TUNNEL_INFO_BITS 12
|
||||
#define TUNNEL_INFO_BITS_MASK GENMASK(TUNNEL_INFO_BITS - 1, 0)
|
||||
#define ENC_OPTS_BITS 12
|
||||
#define ENC_OPTS_BITS 11
|
||||
#define ENC_OPTS_BITS_MASK GENMASK(ENC_OPTS_BITS - 1, 0)
|
||||
#define TUNNEL_ID_BITS (TUNNEL_INFO_BITS + ENC_OPTS_BITS)
|
||||
#define TUNNEL_ID_MASK GENMASK(TUNNEL_ID_BITS - 1, 0)
|
||||
@ -198,10 +198,10 @@ enum mlx5e_tc_attr_to_reg {
|
||||
|
||||
struct mlx5e_tc_attr_to_reg_mapping {
|
||||
int mfield; /* rewrite field */
|
||||
int moffset; /* offset of mfield */
|
||||
int mlen; /* bytes to rewrite/match */
|
||||
int moffset; /* bit offset of mfield */
|
||||
int mlen; /* bits to rewrite/match */
|
||||
|
||||
int soffset; /* offset of spec for match */
|
||||
int soffset; /* byte offset of spec for match */
|
||||
};
|
||||
|
||||
extern struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[];
|
||||
|
@ -36,6 +36,7 @@
|
||||
|
||||
#include "fs_core.h"
|
||||
#include "fs_cmd.h"
|
||||
#include "fs_ft_pool.h"
|
||||
#include "mlx5_core.h"
|
||||
#include "eswitch.h"
|
||||
|
||||
@ -49,9 +50,11 @@ static int mlx5_cmd_stub_update_root_ft(struct mlx5_flow_root_namespace *ns,
|
||||
|
||||
static int mlx5_cmd_stub_create_flow_table(struct mlx5_flow_root_namespace *ns,
|
||||
struct mlx5_flow_table *ft,
|
||||
unsigned int log_size,
|
||||
unsigned int size,
|
||||
struct mlx5_flow_table *next_ft)
|
||||
{
|
||||
ft->max_fte = size ? roundup_pow_of_two(size) : 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -181,7 +184,7 @@ static int mlx5_cmd_update_root_ft(struct mlx5_flow_root_namespace *ns,
|
||||
|
||||
static int mlx5_cmd_create_flow_table(struct mlx5_flow_root_namespace *ns,
|
||||
struct mlx5_flow_table *ft,
|
||||
unsigned int log_size,
|
||||
unsigned int size,
|
||||
struct mlx5_flow_table *next_ft)
|
||||
{
|
||||
int en_encap = !!(ft->flags & MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT);
|
||||
@ -192,12 +195,18 @@ static int mlx5_cmd_create_flow_table(struct mlx5_flow_root_namespace *ns,
|
||||
struct mlx5_core_dev *dev = ns->dev;
|
||||
int err;
|
||||
|
||||
if (size != POOL_NEXT_SIZE)
|
||||
size = roundup_pow_of_two(size);
|
||||
size = mlx5_ft_pool_get_avail_sz(dev, ft->type, size);
|
||||
if (!size)
|
||||
return -ENOSPC;
|
||||
|
||||
MLX5_SET(create_flow_table_in, in, opcode,
|
||||
MLX5_CMD_OP_CREATE_FLOW_TABLE);
|
||||
|
||||
MLX5_SET(create_flow_table_in, in, table_type, ft->type);
|
||||
MLX5_SET(create_flow_table_in, in, flow_table_context.level, ft->level);
|
||||
MLX5_SET(create_flow_table_in, in, flow_table_context.log_size, log_size);
|
||||
MLX5_SET(create_flow_table_in, in, flow_table_context.log_size, size ? ilog2(size) : 0);
|
||||
MLX5_SET(create_flow_table_in, in, vport_number, ft->vport);
|
||||
MLX5_SET(create_flow_table_in, in, other_vport,
|
||||
!!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
|
||||
@ -234,9 +243,14 @@ static int mlx5_cmd_create_flow_table(struct mlx5_flow_root_namespace *ns,
|
||||
}
|
||||
|
||||
err = mlx5_cmd_exec_inout(dev, create_flow_table, in, out);
|
||||
if (!err)
|
||||
if (!err) {
|
||||
ft->id = MLX5_GET(create_flow_table_out, out,
|
||||
table_id);
|
||||
ft->max_fte = size;
|
||||
} else {
|
||||
mlx5_ft_pool_put_sz(ns->dev, size);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -245,6 +259,7 @@ static int mlx5_cmd_destroy_flow_table(struct mlx5_flow_root_namespace *ns,
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)] = {};
|
||||
struct mlx5_core_dev *dev = ns->dev;
|
||||
int err;
|
||||
|
||||
MLX5_SET(destroy_flow_table_in, in, opcode,
|
||||
MLX5_CMD_OP_DESTROY_FLOW_TABLE);
|
||||
@ -254,7 +269,11 @@ static int mlx5_cmd_destroy_flow_table(struct mlx5_flow_root_namespace *ns,
|
||||
MLX5_SET(destroy_flow_table_in, in, other_vport,
|
||||
!!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
|
||||
|
||||
return mlx5_cmd_exec_in(dev, destroy_flow_table, in);
|
||||
err = mlx5_cmd_exec_in(dev, destroy_flow_table, in);
|
||||
if (!err)
|
||||
mlx5_ft_pool_put_sz(ns->dev, ft->max_fte);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int mlx5_cmd_modify_flow_table(struct mlx5_flow_root_namespace *ns,
|
||||
|
@ -38,7 +38,7 @@
|
||||
struct mlx5_flow_cmds {
|
||||
int (*create_flow_table)(struct mlx5_flow_root_namespace *ns,
|
||||
struct mlx5_flow_table *ft,
|
||||
unsigned int log_size,
|
||||
unsigned int size,
|
||||
struct mlx5_flow_table *next_ft);
|
||||
int (*destroy_flow_table)(struct mlx5_flow_root_namespace *ns,
|
||||
struct mlx5_flow_table *ft);
|
||||
|
@ -38,6 +38,7 @@
|
||||
#include "mlx5_core.h"
|
||||
#include "fs_core.h"
|
||||
#include "fs_cmd.h"
|
||||
#include "fs_ft_pool.h"
|
||||
#include "diag/fs_tracepoint.h"
|
||||
#include "accel/ipsec.h"
|
||||
#include "fpga/ipsec.h"
|
||||
@ -752,7 +753,7 @@ static struct mlx5_flow_group *alloc_insert_flow_group(struct mlx5_flow_table *f
|
||||
return fg;
|
||||
}
|
||||
|
||||
static struct mlx5_flow_table *alloc_flow_table(int level, u16 vport, int max_fte,
|
||||
static struct mlx5_flow_table *alloc_flow_table(int level, u16 vport,
|
||||
enum fs_flow_table_type table_type,
|
||||
enum fs_flow_table_op_mod op_mod,
|
||||
u32 flags)
|
||||
@ -775,7 +776,6 @@ static struct mlx5_flow_table *alloc_flow_table(int level, u16 vport, int max_ft
|
||||
ft->op_mod = op_mod;
|
||||
ft->type = table_type;
|
||||
ft->vport = vport;
|
||||
ft->max_fte = max_fte;
|
||||
ft->flags = flags;
|
||||
INIT_LIST_HEAD(&ft->fwd_rules);
|
||||
mutex_init(&ft->lock);
|
||||
@ -1070,7 +1070,6 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa
|
||||
struct mlx5_flow_table *next_ft;
|
||||
struct fs_prio *fs_prio = NULL;
|
||||
struct mlx5_flow_table *ft;
|
||||
int log_table_sz;
|
||||
int err;
|
||||
|
||||
if (!root) {
|
||||
@ -1101,7 +1100,6 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa
|
||||
*/
|
||||
ft = alloc_flow_table(ft_attr->level,
|
||||
vport,
|
||||
ft_attr->max_fte ? roundup_pow_of_two(ft_attr->max_fte) : 0,
|
||||
root->table_type,
|
||||
op_mod, ft_attr->flags);
|
||||
if (IS_ERR(ft)) {
|
||||
@ -1110,12 +1108,11 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa
|
||||
}
|
||||
|
||||
tree_init_node(&ft->node, del_hw_flow_table, del_sw_flow_table);
|
||||
log_table_sz = ft->max_fte ? ilog2(ft->max_fte) : 0;
|
||||
next_ft = unmanaged ? ft_attr->next_ft :
|
||||
find_next_chained_ft(fs_prio);
|
||||
ft->def_miss_action = ns->def_miss_action;
|
||||
ft->ns = ns;
|
||||
err = root->cmds->create_flow_table(root, ft, log_table_sz, next_ft);
|
||||
err = root->cmds->create_flow_table(root, ft, ft_attr->max_fte, next_ft);
|
||||
if (err)
|
||||
goto free_ft;
|
||||
|
||||
@ -1170,28 +1167,36 @@ mlx5_create_lag_demux_flow_table(struct mlx5_flow_namespace *ns,
|
||||
|
||||
ft_attr.level = level;
|
||||
ft_attr.prio = prio;
|
||||
ft_attr.max_fte = 1;
|
||||
|
||||
return __mlx5_create_flow_table(ns, &ft_attr, FS_FT_OP_MOD_LAG_DEMUX, 0);
|
||||
}
|
||||
EXPORT_SYMBOL(mlx5_create_lag_demux_flow_table);
|
||||
|
||||
#define MAX_FLOW_GROUP_SIZE BIT(24)
|
||||
struct mlx5_flow_table*
|
||||
mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
|
||||
struct mlx5_flow_table_attr *ft_attr)
|
||||
{
|
||||
int num_reserved_entries = ft_attr->autogroup.num_reserved_entries;
|
||||
int autogroups_max_fte = ft_attr->max_fte - num_reserved_entries;
|
||||
int max_num_groups = ft_attr->autogroup.max_num_groups;
|
||||
struct mlx5_flow_table *ft;
|
||||
|
||||
if (max_num_groups > autogroups_max_fte)
|
||||
return ERR_PTR(-EINVAL);
|
||||
if (num_reserved_entries > ft_attr->max_fte)
|
||||
return ERR_PTR(-EINVAL);
|
||||
int autogroups_max_fte;
|
||||
|
||||
ft = mlx5_create_flow_table(ns, ft_attr);
|
||||
if (IS_ERR(ft))
|
||||
return ft;
|
||||
|
||||
autogroups_max_fte = ft->max_fte - num_reserved_entries;
|
||||
if (max_num_groups > autogroups_max_fte)
|
||||
goto err_validate;
|
||||
if (num_reserved_entries > ft->max_fte)
|
||||
goto err_validate;
|
||||
|
||||
/* Align the number of groups according to the largest group size */
|
||||
if (autogroups_max_fte / (max_num_groups + 1) > MAX_FLOW_GROUP_SIZE)
|
||||
max_num_groups = (autogroups_max_fte / MAX_FLOW_GROUP_SIZE) - 1;
|
||||
|
||||
ft->autogroup.active = true;
|
||||
ft->autogroup.required_groups = max_num_groups;
|
||||
ft->autogroup.max_fte = autogroups_max_fte;
|
||||
@ -1199,6 +1204,10 @@ mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
|
||||
ft->autogroup.group_size = autogroups_max_fte / (max_num_groups + 1);
|
||||
|
||||
return ft;
|
||||
|
||||
err_validate:
|
||||
mlx5_destroy_flow_table(ft);
|
||||
return ERR_PTR(-ENOSPC);
|
||||
}
|
||||
EXPORT_SYMBOL(mlx5_create_auto_grouped_flow_table);
|
||||
|
||||
@ -2592,6 +2601,7 @@ void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
|
||||
mlx5_cleanup_fc_stats(dev);
|
||||
kmem_cache_destroy(steering->ftes_cache);
|
||||
kmem_cache_destroy(steering->fgs_cache);
|
||||
mlx5_ft_pool_destroy(dev);
|
||||
kfree(steering);
|
||||
}
|
||||
|
||||
@ -2942,9 +2952,13 @@ int mlx5_init_fs(struct mlx5_core_dev *dev)
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = mlx5_ft_pool_init(dev);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
steering = kzalloc(sizeof(*steering), GFP_KERNEL);
|
||||
if (!steering)
|
||||
return -ENOMEM;
|
||||
goto err;
|
||||
steering->dev = dev;
|
||||
dev->priv.steering = steering;
|
||||
|
||||
|
@ -331,6 +331,7 @@ void mlx5_fs_ingress_acls_cleanup(struct mlx5_core_dev *dev);
|
||||
|
||||
#define MLX5_CAP_FLOWTABLE_TYPE(mdev, cap, type) ( \
|
||||
(type == FS_FT_NIC_RX) ? MLX5_CAP_FLOWTABLE_NIC_RX(mdev, cap) : \
|
||||
(type == FS_FT_NIC_TX) ? MLX5_CAP_FLOWTABLE_NIC_TX(mdev, cap) : \
|
||||
(type == FS_FT_ESW_EGRESS_ACL) ? MLX5_CAP_ESW_EGRESS_ACL(mdev, cap) : \
|
||||
(type == FS_FT_ESW_INGRESS_ACL) ? MLX5_CAP_ESW_INGRESS_ACL(mdev, cap) : \
|
||||
(type == FS_FT_FDB) ? MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) : \
|
||||
|
83
drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.c
Normal file
83
drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.c
Normal file
@ -0,0 +1,83 @@
|
||||
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
|
||||
/* Copyright (c) 2021 Mellanox Technologies. */
|
||||
|
||||
#include "fs_ft_pool.h"
|
||||
|
||||
/* Firmware currently has 4 pool of 4 sizes that it supports (FT_POOLS),
|
||||
* and a virtual memory region of 16M (MLX5_FT_SIZE), this region is duplicated
|
||||
* for each flow table pool. We can allocate up to 16M of each pool,
|
||||
* and we keep track of how much we used via mlx5_ft_pool_get_avail_sz.
|
||||
* Firmware doesn't report any of this for now.
|
||||
* ESW_POOL is expected to be sorted from large to small and match firmware
|
||||
* pools.
|
||||
*/
|
||||
#define FT_SIZE (16 * 1024 * 1024)
|
||||
static const unsigned int FT_POOLS[] = { 4 * 1024 * 1024,
|
||||
1 * 1024 * 1024,
|
||||
64 * 1024,
|
||||
128,
|
||||
1 /* size for termination tables */ };
|
||||
struct mlx5_ft_pool {
|
||||
int ft_left[ARRAY_SIZE(FT_POOLS)];
|
||||
};
|
||||
|
||||
int mlx5_ft_pool_init(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct mlx5_ft_pool *ft_pool;
|
||||
int i;
|
||||
|
||||
ft_pool = kzalloc(sizeof(*ft_pool), GFP_KERNEL);
|
||||
|
||||
for (i = ARRAY_SIZE(FT_POOLS) - 1; i >= 0; i--)
|
||||
ft_pool->ft_left[i] = FT_SIZE / FT_POOLS[i];
|
||||
|
||||
dev->priv.ft_pool = ft_pool;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void mlx5_ft_pool_destroy(struct mlx5_core_dev *dev)
|
||||
{
|
||||
kfree(dev->priv.ft_pool);
|
||||
}
|
||||
|
||||
int
|
||||
mlx5_ft_pool_get_avail_sz(struct mlx5_core_dev *dev, enum fs_flow_table_type table_type,
|
||||
int desired_size)
|
||||
{
|
||||
u32 max_ft_size = 1 << MLX5_CAP_FLOWTABLE_TYPE(dev, log_max_ft_size, table_type);
|
||||
int i, found_i = -1;
|
||||
|
||||
for (i = ARRAY_SIZE(FT_POOLS) - 1; i >= 0; i--) {
|
||||
if (dev->priv.ft_pool->ft_left[i] && FT_POOLS[i] >= desired_size &&
|
||||
FT_POOLS[i] <= max_ft_size) {
|
||||
found_i = i;
|
||||
if (desired_size != POOL_NEXT_SIZE)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (found_i != -1) {
|
||||
--dev->priv.ft_pool->ft_left[found_i];
|
||||
return FT_POOLS[found_i];
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
mlx5_ft_pool_put_sz(struct mlx5_core_dev *dev, int sz)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (!sz)
|
||||
return;
|
||||
|
||||
for (i = ARRAY_SIZE(FT_POOLS) - 1; i >= 0; i--) {
|
||||
if (sz == FT_POOLS[i]) {
|
||||
++dev->priv.ft_pool->ft_left[i];
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
WARN_ONCE(1, "Couldn't find size %d in flow table size pool", sz);
|
||||
}
|
21
drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.h
Normal file
21
drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.h
Normal file
@ -0,0 +1,21 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
|
||||
/* Copyright (c) 2021 Mellanox Technologies. */
|
||||
|
||||
#ifndef __MLX5_FS_FT_POOL_H__
|
||||
#define __MLX5_FS_FT_POOL_H__
|
||||
|
||||
#include <linux/mlx5/driver.h>
|
||||
#include "fs_core.h"
|
||||
|
||||
#define POOL_NEXT_SIZE 0
|
||||
|
||||
int mlx5_ft_pool_init(struct mlx5_core_dev *dev);
|
||||
void mlx5_ft_pool_destroy(struct mlx5_core_dev *dev);
|
||||
|
||||
int
|
||||
mlx5_ft_pool_get_avail_sz(struct mlx5_core_dev *dev, enum fs_flow_table_type table_type,
|
||||
int desired_size);
|
||||
void
|
||||
mlx5_ft_pool_put_sz(struct mlx5_core_dev *dev, int sz);
|
||||
|
||||
#endif /* __MLX5_FS_FT_POOL_H__ */
|
@ -118,17 +118,24 @@ static bool __mlx5_lag_is_sriov(struct mlx5_lag *ldev)
|
||||
static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker,
|
||||
u8 *port1, u8 *port2)
|
||||
{
|
||||
bool p1en;
|
||||
bool p2en;
|
||||
|
||||
p1en = tracker->netdev_state[MLX5_LAG_P1].tx_enabled &&
|
||||
tracker->netdev_state[MLX5_LAG_P1].link_up;
|
||||
|
||||
p2en = tracker->netdev_state[MLX5_LAG_P2].tx_enabled &&
|
||||
tracker->netdev_state[MLX5_LAG_P2].link_up;
|
||||
|
||||
*port1 = 1;
|
||||
*port2 = 2;
|
||||
if (!tracker->netdev_state[MLX5_LAG_P1].tx_enabled ||
|
||||
!tracker->netdev_state[MLX5_LAG_P1].link_up) {
|
||||
*port1 = 2;
|
||||
if ((!p1en && !p2en) || (p1en && p2en))
|
||||
return;
|
||||
}
|
||||
|
||||
if (!tracker->netdev_state[MLX5_LAG_P2].tx_enabled ||
|
||||
!tracker->netdev_state[MLX5_LAG_P2].link_up)
|
||||
if (p1en)
|
||||
*port2 = 1;
|
||||
else
|
||||
*port1 = 2;
|
||||
}
|
||||
|
||||
void mlx5_modify_lag(struct mlx5_lag *ldev,
|
||||
@ -280,9 +287,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
|
||||
if (!mlx5_lag_is_ready(ldev))
|
||||
return;
|
||||
|
||||
spin_lock(&lag_lock);
|
||||
tracker = ldev->tracker;
|
||||
spin_unlock(&lag_lock);
|
||||
|
||||
do_bond = tracker.is_bonded && mlx5_lag_check_prereq(ldev);
|
||||
|
||||
@ -291,8 +296,9 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
|
||||
!mlx5_sriov_is_enabled(dev1);
|
||||
|
||||
#ifdef CONFIG_MLX5_ESWITCH
|
||||
roce_lag &= dev0->priv.eswitch->mode == MLX5_ESWITCH_NONE &&
|
||||
dev1->priv.eswitch->mode == MLX5_ESWITCH_NONE;
|
||||
roce_lag = roce_lag &&
|
||||
dev0->priv.eswitch->mode == MLX5_ESWITCH_NONE &&
|
||||
dev1->priv.eswitch->mode == MLX5_ESWITCH_NONE;
|
||||
#endif
|
||||
|
||||
if (roce_lag)
|
||||
@ -481,9 +487,7 @@ static int mlx5_lag_netdev_event(struct notifier_block *this,
|
||||
break;
|
||||
}
|
||||
|
||||
spin_lock(&lag_lock);
|
||||
ldev->tracker = tracker;
|
||||
spin_unlock(&lag_lock);
|
||||
|
||||
if (changed)
|
||||
mlx5_queue_bond_work(ldev, 0);
|
||||
|
@ -6,6 +6,7 @@
|
||||
#include <linux/mlx5/fs.h>
|
||||
|
||||
#include "lib/fs_chains.h"
|
||||
#include "fs_ft_pool.h"
|
||||
#include "en/mapping.h"
|
||||
#include "fs_core.h"
|
||||
#include "en_tc.h"
|
||||
@ -13,25 +14,10 @@
|
||||
#define chains_lock(chains) ((chains)->lock)
|
||||
#define chains_ht(chains) ((chains)->chains_ht)
|
||||
#define prios_ht(chains) ((chains)->prios_ht)
|
||||
#define ft_pool_left(chains) ((chains)->ft_left)
|
||||
#define tc_default_ft(chains) ((chains)->tc_default_ft)
|
||||
#define tc_end_ft(chains) ((chains)->tc_end_ft)
|
||||
#define ns_to_chains_fs_prio(ns) ((ns) == MLX5_FLOW_NAMESPACE_FDB ? \
|
||||
FDB_TC_OFFLOAD : MLX5E_TC_PRIO)
|
||||
|
||||
/* Firmware currently has 4 pool of 4 sizes that it supports (FT_POOLS),
|
||||
* and a virtual memory region of 16M (MLX5_FT_SIZE), this region is duplicated
|
||||
* for each flow table pool. We can allocate up to 16M of each pool,
|
||||
* and we keep track of how much we used via get_next_avail_sz_from_pool.
|
||||
* Firmware doesn't report any of this for now.
|
||||
* ESW_POOL is expected to be sorted from large to small and match firmware
|
||||
* pools.
|
||||
*/
|
||||
#define FT_SIZE (16 * 1024 * 1024)
|
||||
static const unsigned int FT_POOLS[] = { 4 * 1024 * 1024,
|
||||
1 * 1024 * 1024,
|
||||
64 * 1024,
|
||||
128 };
|
||||
#define FT_TBL_SZ (64 * 1024)
|
||||
|
||||
struct mlx5_fs_chains {
|
||||
@ -49,8 +35,6 @@ struct mlx5_fs_chains {
|
||||
enum mlx5_flow_namespace_type ns;
|
||||
u32 group_num;
|
||||
u32 flags;
|
||||
|
||||
int ft_left[ARRAY_SIZE(FT_POOLS)];
|
||||
};
|
||||
|
||||
struct fs_chain {
|
||||
@ -160,54 +144,6 @@ mlx5_chains_set_end_ft(struct mlx5_fs_chains *chains,
|
||||
tc_end_ft(chains) = ft;
|
||||
}
|
||||
|
||||
#define POOL_NEXT_SIZE 0
|
||||
static int
|
||||
mlx5_chains_get_avail_sz_from_pool(struct mlx5_fs_chains *chains,
|
||||
int desired_size)
|
||||
{
|
||||
int i, found_i = -1;
|
||||
|
||||
for (i = ARRAY_SIZE(FT_POOLS) - 1; i >= 0; i--) {
|
||||
if (ft_pool_left(chains)[i] && FT_POOLS[i] > desired_size) {
|
||||
found_i = i;
|
||||
if (desired_size != POOL_NEXT_SIZE)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (found_i != -1) {
|
||||
--ft_pool_left(chains)[found_i];
|
||||
return FT_POOLS[found_i];
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
mlx5_chains_put_sz_to_pool(struct mlx5_fs_chains *chains, int sz)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = ARRAY_SIZE(FT_POOLS) - 1; i >= 0; i--) {
|
||||
if (sz == FT_POOLS[i]) {
|
||||
++ft_pool_left(chains)[i];
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
WARN_ONCE(1, "Couldn't find size %d in flow table size pool", sz);
|
||||
}
|
||||
|
||||
static void
|
||||
mlx5_chains_init_sz_pool(struct mlx5_fs_chains *chains, u32 ft_max)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = ARRAY_SIZE(FT_POOLS) - 1; i >= 0; i--)
|
||||
ft_pool_left(chains)[i] =
|
||||
FT_POOLS[i] <= ft_max ? FT_SIZE / FT_POOLS[i] : 0;
|
||||
}
|
||||
|
||||
static struct mlx5_flow_table *
|
||||
mlx5_chains_create_table(struct mlx5_fs_chains *chains,
|
||||
u32 chain, u32 prio, u32 level)
|
||||
@ -221,11 +157,7 @@ mlx5_chains_create_table(struct mlx5_fs_chains *chains,
|
||||
ft_attr.flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
|
||||
MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
|
||||
|
||||
sz = (chain == mlx5_chains_get_nf_ft_chain(chains)) ?
|
||||
mlx5_chains_get_avail_sz_from_pool(chains, FT_TBL_SZ) :
|
||||
mlx5_chains_get_avail_sz_from_pool(chains, POOL_NEXT_SIZE);
|
||||
if (!sz)
|
||||
return ERR_PTR(-ENOSPC);
|
||||
sz = (chain == mlx5_chains_get_nf_ft_chain(chains)) ? FT_TBL_SZ : POOL_NEXT_SIZE;
|
||||
ft_attr.max_fte = sz;
|
||||
|
||||
/* We use tc_default_ft(chains) as the table's next_ft till
|
||||
@ -266,21 +198,12 @@ mlx5_chains_create_table(struct mlx5_fs_chains *chains,
|
||||
if (IS_ERR(ft)) {
|
||||
mlx5_core_warn(chains->dev, "Failed to create chains table err %d (chain: %d, prio: %d, level: %d, size: %d)\n",
|
||||
(int)PTR_ERR(ft), chain, prio, level, sz);
|
||||
mlx5_chains_put_sz_to_pool(chains, sz);
|
||||
return ft;
|
||||
}
|
||||
|
||||
return ft;
|
||||
}
|
||||
|
||||
static void
|
||||
mlx5_chains_destroy_table(struct mlx5_fs_chains *chains,
|
||||
struct mlx5_flow_table *ft)
|
||||
{
|
||||
mlx5_chains_put_sz_to_pool(chains, ft->max_fte);
|
||||
mlx5_destroy_flow_table(ft);
|
||||
}
|
||||
|
||||
static int
|
||||
create_chain_restore(struct fs_chain *chain)
|
||||
{
|
||||
@ -336,9 +259,10 @@ create_chain_restore(struct fs_chain *chain)
|
||||
MLX5_SET(set_action_in, modact, field,
|
||||
mlx5e_tc_attr_to_reg_mappings[chain_to_reg].mfield);
|
||||
MLX5_SET(set_action_in, modact, offset,
|
||||
mlx5e_tc_attr_to_reg_mappings[chain_to_reg].moffset * 8);
|
||||
mlx5e_tc_attr_to_reg_mappings[chain_to_reg].moffset);
|
||||
MLX5_SET(set_action_in, modact, length,
|
||||
mlx5e_tc_attr_to_reg_mappings[chain_to_reg].mlen * 8);
|
||||
mlx5e_tc_attr_to_reg_mappings[chain_to_reg].mlen == 32 ?
|
||||
0 : mlx5e_tc_attr_to_reg_mappings[chain_to_reg].mlen);
|
||||
MLX5_SET(set_action_in, modact, data, chain->id);
|
||||
mod_hdr = mlx5_modify_header_alloc(chains->dev, chains->ns,
|
||||
1, modact);
|
||||
@ -636,7 +560,7 @@ err_insert:
|
||||
err_miss_rule:
|
||||
mlx5_destroy_flow_group(miss_group);
|
||||
err_group:
|
||||
mlx5_chains_destroy_table(chains, ft);
|
||||
mlx5_destroy_flow_table(ft);
|
||||
err_create:
|
||||
err_alloc:
|
||||
kvfree(prio_s);
|
||||
@ -659,7 +583,7 @@ mlx5_chains_destroy_prio(struct mlx5_fs_chains *chains,
|
||||
prio_params);
|
||||
mlx5_del_flow_rules(prio->miss_rule);
|
||||
mlx5_destroy_flow_group(prio->miss_group);
|
||||
mlx5_chains_destroy_table(chains, prio->ft);
|
||||
mlx5_destroy_flow_table(prio->ft);
|
||||
mlx5_chains_put_chain(chain);
|
||||
kvfree(prio);
|
||||
}
|
||||
@ -784,7 +708,7 @@ void
|
||||
mlx5_chains_destroy_global_table(struct mlx5_fs_chains *chains,
|
||||
struct mlx5_flow_table *ft)
|
||||
{
|
||||
mlx5_chains_destroy_table(chains, ft);
|
||||
mlx5_destroy_flow_table(ft);
|
||||
}
|
||||
|
||||
static struct mlx5_fs_chains *
|
||||
@ -816,8 +740,6 @@ mlx5_chains_init(struct mlx5_core_dev *dev, struct mlx5_chains_attr *attr)
|
||||
mlx5_chains_get_chain_range(chains_priv),
|
||||
mlx5_chains_get_prio_range(chains_priv));
|
||||
|
||||
mlx5_chains_init_sz_pool(chains_priv, attr->max_ft_sz);
|
||||
|
||||
err = rhashtable_init(&chains_ht(chains_priv), &chain_params);
|
||||
if (err)
|
||||
goto init_chains_ht_err;
|
||||
|
@ -1252,7 +1252,6 @@ struct mlx5dr_send_ring {
|
||||
u32 tx_head;
|
||||
void *buf;
|
||||
u32 buf_size;
|
||||
struct ib_wc wc[MAX_SEND_CQE];
|
||||
u8 sync_buff[MIN_READ_SYNC];
|
||||
struct mlx5dr_mr *sync_mr;
|
||||
spinlock_t lock; /* Protect the data path of the send ring */
|
||||
|
@ -62,7 +62,7 @@ static int set_miss_action(struct mlx5_flow_root_namespace *ns,
|
||||
|
||||
static int mlx5_cmd_dr_create_flow_table(struct mlx5_flow_root_namespace *ns,
|
||||
struct mlx5_flow_table *ft,
|
||||
unsigned int log_size,
|
||||
unsigned int size,
|
||||
struct mlx5_flow_table *next_ft)
|
||||
{
|
||||
struct mlx5dr_table *tbl;
|
||||
@ -71,7 +71,7 @@ static int mlx5_cmd_dr_create_flow_table(struct mlx5_flow_root_namespace *ns,
|
||||
|
||||
if (mlx5_dr_is_fw_table(ft->flags))
|
||||
return mlx5_fs_cmd_get_fw_cmds()->create_flow_table(ns, ft,
|
||||
log_size,
|
||||
size,
|
||||
next_ft);
|
||||
flags = ft->flags;
|
||||
/* turn off encap/decap if not supported for sw-str by fw */
|
||||
@ -97,6 +97,8 @@ static int mlx5_cmd_dr_create_flow_table(struct mlx5_flow_root_namespace *ns,
|
||||
}
|
||||
}
|
||||
|
||||
ft->max_fte = INT_MAX;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -550,6 +550,7 @@ struct mlx5_adev {
|
||||
int idx;
|
||||
};
|
||||
|
||||
struct mlx5_ft_pool;
|
||||
struct mlx5_priv {
|
||||
/* IRQ table valid only for real pci devices PF or VF */
|
||||
struct mlx5_irq_table *irq_table;
|
||||
@ -602,6 +603,7 @@ struct mlx5_priv {
|
||||
struct mlx5_core_roce roce;
|
||||
struct mlx5_fc_stats fc_stats;
|
||||
struct mlx5_rl_table rl_table;
|
||||
struct mlx5_ft_pool *ft_pool;
|
||||
|
||||
struct mlx5_bfreg_data bfregs;
|
||||
struct mlx5_uars_page *uar;
|
||||
|
@ -98,10 +98,11 @@ u32 mlx5_eswitch_get_vport_metadata_for_set(struct mlx5_eswitch *esw,
|
||||
u16 vport_num);
|
||||
|
||||
/* Reg C1 usage:
|
||||
* Reg C1 = < ESW_TUN_ID(12) | ESW_TUN_OPTS(12) | ESW_ZONE_ID(8) >
|
||||
* Reg C1 = < Reserved(1) | ESW_TUN_ID(12) | ESW_TUN_OPTS(11) | ESW_ZONE_ID(8) >
|
||||
*
|
||||
* Highest 12 bits of reg c1 is the encapsulation tunnel id, next 12 bits is
|
||||
* encapsulation tunnel options, and the lowest 8 bits are used for zone id.
|
||||
* Highest bit is reserved for other offloads as marker bit, next 12 bits of reg c1
|
||||
* is the encapsulation tunnel id, next 11 bits is encapsulation tunnel options,
|
||||
* and the lowest 8 bits are used for zone id.
|
||||
*
|
||||
* Zone id is used to restore CT flow when packet misses on chain.
|
||||
*
|
||||
@ -109,16 +110,18 @@ u32 mlx5_eswitch_get_vport_metadata_for_set(struct mlx5_eswitch *esw,
|
||||
* on miss and to support inner header rewrite by means of implicit chain 0
|
||||
* flows.
|
||||
*/
|
||||
#define ESW_RESERVED_BITS 1
|
||||
#define ESW_ZONE_ID_BITS 8
|
||||
#define ESW_TUN_OPTS_BITS 12
|
||||
#define ESW_TUN_OPTS_BITS 11
|
||||
#define ESW_TUN_ID_BITS 12
|
||||
#define ESW_TUN_OPTS_OFFSET ESW_ZONE_ID_BITS
|
||||
#define ESW_TUN_OFFSET ESW_TUN_OPTS_OFFSET
|
||||
#define ESW_ZONE_ID_MASK GENMASK(ESW_ZONE_ID_BITS - 1, 0)
|
||||
#define ESW_TUN_OPTS_MASK GENMASK(32 - ESW_TUN_ID_BITS - 1, ESW_TUN_OPTS_OFFSET)
|
||||
#define ESW_TUN_MASK GENMASK(31, ESW_TUN_OFFSET)
|
||||
#define ESW_TUN_OPTS_MASK GENMASK(31 - ESW_TUN_ID_BITS - ESW_RESERVED_BITS, ESW_TUN_OPTS_OFFSET)
|
||||
#define ESW_TUN_MASK GENMASK(31 - ESW_RESERVED_BITS, ESW_TUN_OFFSET)
|
||||
#define ESW_TUN_ID_SLOW_TABLE_GOTO_VPORT 0 /* 0 is not a valid tunnel id */
|
||||
#define ESW_TUN_OPTS_SLOW_TABLE_GOTO_VPORT 0xFFF /* 0xFFF is a reserved mapping */
|
||||
/* 0x7FF is a reserved mapping */
|
||||
#define ESW_TUN_OPTS_SLOW_TABLE_GOTO_VPORT GENMASK(ESW_TUN_OPTS_BITS - 1, 0)
|
||||
#define ESW_TUN_SLOW_TABLE_GOTO_VPORT ((ESW_TUN_ID_SLOW_TABLE_GOTO_VPORT << ESW_TUN_OPTS_BITS) | \
|
||||
ESW_TUN_OPTS_SLOW_TABLE_GOTO_VPORT)
|
||||
#define ESW_TUN_SLOW_TABLE_GOTO_VPORT_MARK ESW_TUN_OPTS_MASK
|
||||
|
Loading…
Reference in New Issue
Block a user