mlx5-updates-2021-12-28

mlx5 Software steering, New features and optimizations
 
 This patch series brings various SW steering features, optimizations and
 debug-ability focused improvements.
 
  1) Expose debugfs for dumping the SW steering resources
  2) Removing unused fields
  3) support for matching on new fields
  4) steering optimization for RX/TX-only rules
  5) Make Software steering the default steering mechanism when
     available, applies only to Switchdev mode FDB
 
 From Yevgeny Kliteynik and Muhammad Sammar:
 
  - Patch 1 fixes an error flow in creating matchers
  - Patch 2 fix lower case macro prefix "mlx5_" to "MLX5_"
  - Patch 3 removes unused struct member in mlx5dr_matcher
  - Patch 4 renames list field in matcher struct to list_node to reflect the
    fact that is field is for list node that is stored on another struct's lists
  - Patch 5 adds checking for valid Flex parser ID value
  - Patch 6 adds the missing reserved fields to dr_match_param and aligns it to
    the format that is defined by HW spec
  - Patch 7 adds support for dumping SW steering (SMFS) resources using debugfs
    in CSV format: domain and its tables, matchers and rules
  - Patch 8 adds support for a new destination type - UPLINK
  - Patch 9 adds WARN_ON_ONCE on refcount checks in SW steering object destructors
  - Patches 10, 11, 12 add misc5 flow table match parameters and add support for
    matching on tunnel headers 0 and 1
  - Patch 13 adds support for matching on geneve_tlv_option_0_exist field
  - Patch 14 implements performance optimization for for empty or RX/TX-only
    matchers by splitting RX and TX matchers handling: matcher connection in the
    matchers chain is split into two separate lists (RX only and TX only), which
    solves a usecase of many RX or TX only rules that create a long chain of
    RX/TX-only paths w/o the actual rules
  - Patch 15 ignores modify TTL if device doesn't support it instead of
    adding and unsupported action
  - Patch 16 sets SMFS as a default steering mode
 -----BEGIN PGP SIGNATURE-----
 
 iQEzBAABCAAdFiEEGhZs6bAKwk/OTgTpSD+KveBX+j4FAmHOvKsACgkQSD+KveBX
 +j4CpQf8Cc3NkWhGVYYhRBlntdpyVTDpoVhw6RDXVwboIRZ3GcMm81SxgwKrDuUx
 Yhup4K1CNKt44D1RRhX4ElSemfo/afxfGIcq7S87vciUOaebWTIZgRyNvuYr/buI
 v9LIM7zTb1aXL7m3KQHOGc7cucVRvsNjteTxvp/DR0bPFEuzAr5tw9Y5qop9pBbM
 cfgdmXKUoRxA039mqZ2Fl7y+z51zRkfsCza7lEHcGgpvwOLubFEaghj7YTIT6h1m
 +We8w3/+B0mqS0HpByIxmyf/EQBG5Hl7FTbFj1Somn7EFEP4E+LaFzg8nGsUHEeI
 f5027uM7XGlThAUrgdaOMcwBfIuOww==
 =BE8m
 -----END PGP SIGNATURE-----

Merge tag 'mlx5-updates-2021-12-28' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5 Software steering, New features and optimizations

This patch series brings various SW steering features, optimizations and
debug-ability focused improvements.

 1) Expose debugfs for dumping the SW steering resources
 2) Removing unused fields
 3) support for matching on new fields
 4) steering optimization for RX/TX-only rules
 5) Make Software steering the default steering mechanism when
    available, applies only to Switchdev mode FDB

From Yevgeny Kliteynik and Muhammad Sammar:

 - Patch 1 fixes an error flow in creating matchers
 - Patch 2 fix lower case macro prefix "mlx5_" to "MLX5_"
 - Patch 3 removes unused struct member in mlx5dr_matcher
 - Patch 4 renames list field in matcher struct to list_node to reflect the
   fact that is field is for list node that is stored on another struct's lists
 - Patch 5 adds checking for valid Flex parser ID value
 - Patch 6 adds the missing reserved fields to dr_match_param and aligns it to
   the format that is defined by HW spec
 - Patch 7 adds support for dumping SW steering (SMFS) resources using debugfs
   in CSV format: domain and its tables, matchers and rules
 - Patch 8 adds support for a new destination type - UPLINK
 - Patch 9 adds WARN_ON_ONCE on refcount checks in SW steering object destructors
 - Patches 10, 11, 12 add misc5 flow table match parameters and add support for
   matching on tunnel headers 0 and 1
 - Patch 13 adds support for matching on geneve_tlv_option_0_exist field
 - Patch 14 implements performance optimization for for empty or RX/TX-only
   matchers by splitting RX and TX matchers handling: matcher connection in the
   matchers chain is split into two separate lists (RX only and TX only), which
   solves a usecase of many RX or TX only rules that create a long chain of
   RX/TX-only paths w/o the actual rules
 - Patch 15 ignores modify TTL if device doesn't support it instead of
   adding and unsupported action
 - Patch 16 sets SMFS as a default steering mode
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2021-12-31 14:29:31 +00:00
commit ce2b6eb409
22 changed files with 1334 additions and 290 deletions

View File

@ -104,7 +104,8 @@ mlx5_core-$(CONFIG_MLX5_SW_STEERING) += steering/dr_domain.o steering/dr_table.o
steering/dr_ste.o steering/dr_send.o \
steering/dr_ste_v0.o steering/dr_ste_v1.o \
steering/dr_cmd.o steering/dr_fw.o \
steering/dr_action.o steering/fs_dr.o
steering/dr_action.o steering/fs_dr.o \
steering/dr_dbg.o
#
# SF device
#

View File

@ -451,7 +451,8 @@ static int mlx5_set_extended_dest(struct mlx5_core_dev *dev,
list_for_each_entry(dst, &fte->node.children, node.list) {
if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
continue;
if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_VPORT &&
if ((dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_VPORT ||
dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_UPLINK) &&
dst->dest_attr.vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID)
num_encap++;
num_fwd_destinations++;

View File

@ -1525,7 +1525,8 @@ static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
struct mlx5_flow_destination *d2)
{
if (d1->type == d2->type) {
if ((d1->type == MLX5_FLOW_DESTINATION_TYPE_VPORT &&
if (((d1->type == MLX5_FLOW_DESTINATION_TYPE_VPORT ||
d1->type == MLX5_FLOW_DESTINATION_TYPE_UPLINK) &&
d1->vport.num == d2->vport.num &&
d1->vport.flags == d2->vport.flags &&
((d1->vport.flags & MLX5_FLOW_DEST_VPORT_VHCA_ID) ?
@ -3082,6 +3083,11 @@ int mlx5_init_fs(struct mlx5_core_dev *dev)
steering->dev = dev;
dev->priv.steering = steering;
if (mlx5_fs_dr_is_supported(dev))
steering->mode = MLX5_FLOW_STEERING_MODE_SMFS;
else
steering->mode = MLX5_FLOW_STEERING_MODE_DMFS;
steering->fgs_cache = kmem_cache_create("mlx5_fs_fgs",
sizeof(struct mlx5_flow_group), 0,
0, NULL);

View File

@ -203,7 +203,7 @@ struct mlx5_ft_underlay_qp {
u32 qpn;
};
#define MLX5_FTE_MATCH_PARAM_RESERVED reserved_at_c00
#define MLX5_FTE_MATCH_PARAM_RESERVED reserved_at_e00
/* Calculate the fte_match_param length and without the reserved length.
* Make sure the reserved field is the last.
*/

View File

@ -1560,6 +1560,12 @@ dr_action_modify_check_is_ttl_modify(const void *sw_action)
return sw_field == MLX5_ACTION_IN_FIELD_OUT_IP_TTL;
}
static bool dr_action_modify_ttl_ignore(struct mlx5dr_domain *dmn)
{
return !mlx5dr_ste_supp_ttl_cs_recalc(&dmn->info.caps) &&
!MLX5_CAP_ESW_FLOWTABLE(dmn->mdev, fdb_ipv4_ttl_modify);
}
static int dr_actions_convert_modify_header(struct mlx5dr_action *action,
u32 max_hw_actions,
u32 num_sw_actions,
@ -1591,8 +1597,13 @@ static int dr_actions_convert_modify_header(struct mlx5dr_action *action,
if (ret)
return ret;
if (!(*modify_ttl))
*modify_ttl = dr_action_modify_check_is_ttl_modify(sw_action);
if (!(*modify_ttl) &&
dr_action_modify_check_is_ttl_modify(sw_action)) {
if (dr_action_modify_ttl_ignore(dmn))
continue;
*modify_ttl = true;
}
/* Convert SW action to HW action */
ret = dr_action_modify_sw_to_hw(dmn,
@ -1631,7 +1642,7 @@ static int dr_actions_convert_modify_header(struct mlx5dr_action *action,
* modify actions doesn't exceeds the limit
*/
hw_idx++;
if ((num_sw_actions + hw_idx - i) >= max_hw_actions) {
if (hw_idx >= max_hw_actions) {
mlx5dr_dbg(dmn, "Modify header action number exceeds HW limit\n");
return -EINVAL;
}
@ -1642,6 +1653,10 @@ static int dr_actions_convert_modify_header(struct mlx5dr_action *action,
hw_idx++;
}
/* if the resulting HW actions list is empty, add NOP action */
if (!hw_idx)
hw_idx++;
*num_hw_actions = hw_idx;
return 0;
@ -1792,7 +1807,7 @@ mlx5dr_action_create_dest_vport(struct mlx5dr_domain *dmn,
int mlx5dr_action_destroy(struct mlx5dr_action *action)
{
if (refcount_read(&action->refcount) > 1)
if (WARN_ON_ONCE(refcount_read(&action->refcount) > 1))
return -EBUSY;
switch (action->action_type) {

View File

@ -132,6 +132,13 @@ int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev,
caps->isolate_vl_tc = MLX5_CAP_GEN(mdev, isolate_vl_tc_new);
/* geneve_tlv_option_0_exist is the indication of
* STE support for lookup type flex_parser_ok
*/
caps->flex_parser_ok_bits_supp =
MLX5_CAP_FLOWTABLE(mdev,
flow_table_properties_nic_receive.ft_field_support.geneve_tlv_option_0_exist);
if (caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V4_ENABLED) {
caps->flex_parser_id_icmp_dw0 = MLX5_CAP_GEN(mdev, flex_parser_id_icmp_dw0);
caps->flex_parser_id_icmp_dw1 = MLX5_CAP_GEN(mdev, flex_parser_id_icmp_dw1);
@ -152,7 +159,7 @@ int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev,
caps->flex_parser_id_mpls_over_gre =
MLX5_CAP_GEN(mdev, flex_parser_id_outer_first_mpls_over_gre);
if (caps->flex_protocols & mlx5_FLEX_PARSER_MPLS_OVER_UDP_ENABLED)
if (caps->flex_protocols & MLX5_FLEX_PARSER_MPLS_OVER_UDP_ENABLED)
caps->flex_parser_id_mpls_over_udp =
MLX5_CAP_GEN(mdev, flex_parser_id_outer_first_mpls_over_udp_label);
@ -599,7 +606,8 @@ static int mlx5dr_cmd_set_extended_dest(struct mlx5_core_dev *dev,
for (i = 0; i < fte->dests_size; i++) {
if (fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
continue;
if (fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_VPORT &&
if ((fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_VPORT ||
fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_UPLINK) &&
fte->dest_arr[i].vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID)
num_encap++;
num_fwd_destinations++;
@ -724,12 +732,19 @@ int mlx5dr_cmd_set_fte(struct mlx5_core_dev *dev,
case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
id = fte->dest_arr[i].ft_id;
break;
case MLX5_FLOW_DESTINATION_TYPE_UPLINK:
case MLX5_FLOW_DESTINATION_TYPE_VPORT:
id = fte->dest_arr[i].vport.num;
MLX5_SET(dest_format_struct, in_dests,
destination_eswitch_owner_vhca_id_valid,
!!(fte->dest_arr[i].vport.flags &
MLX5_FLOW_DEST_VPORT_VHCA_ID));
if (type == MLX5_FLOW_DESTINATION_TYPE_VPORT) {
id = fte->dest_arr[i].vport.num;
MLX5_SET(dest_format_struct, in_dests,
destination_eswitch_owner_vhca_id_valid,
!!(fte->dest_arr[i].vport.flags &
MLX5_FLOW_DEST_VPORT_VHCA_ID));
} else {
id = 0;
MLX5_SET(dest_format_struct, in_dests,
destination_eswitch_owner_vhca_id_valid, 1);
}
MLX5_SET(dest_format_struct, in_dests,
destination_eswitch_owner_vhca_id,
fte->dest_arr[i].vport.vhca_id);

View File

@ -0,0 +1,649 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#include <linux/debugfs.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/seq_file.h>
#include "dr_types.h"
#define DR_DBG_PTR_TO_ID(p) ((u64)(uintptr_t)(p) & 0xFFFFFFFFULL)
enum dr_dump_rec_type {
DR_DUMP_REC_TYPE_DOMAIN = 3000,
DR_DUMP_REC_TYPE_DOMAIN_INFO_FLEX_PARSER = 3001,
DR_DUMP_REC_TYPE_DOMAIN_INFO_DEV_ATTR = 3002,
DR_DUMP_REC_TYPE_DOMAIN_INFO_VPORT = 3003,
DR_DUMP_REC_TYPE_DOMAIN_INFO_CAPS = 3004,
DR_DUMP_REC_TYPE_DOMAIN_SEND_RING = 3005,
DR_DUMP_REC_TYPE_TABLE = 3100,
DR_DUMP_REC_TYPE_TABLE_RX = 3101,
DR_DUMP_REC_TYPE_TABLE_TX = 3102,
DR_DUMP_REC_TYPE_MATCHER = 3200,
DR_DUMP_REC_TYPE_MATCHER_MASK = 3201,
DR_DUMP_REC_TYPE_MATCHER_RX = 3202,
DR_DUMP_REC_TYPE_MATCHER_TX = 3203,
DR_DUMP_REC_TYPE_MATCHER_BUILDER = 3204,
DR_DUMP_REC_TYPE_RULE = 3300,
DR_DUMP_REC_TYPE_RULE_RX_ENTRY_V0 = 3301,
DR_DUMP_REC_TYPE_RULE_TX_ENTRY_V0 = 3302,
DR_DUMP_REC_TYPE_RULE_RX_ENTRY_V1 = 3303,
DR_DUMP_REC_TYPE_RULE_TX_ENTRY_V1 = 3304,
DR_DUMP_REC_TYPE_ACTION_ENCAP_L2 = 3400,
DR_DUMP_REC_TYPE_ACTION_ENCAP_L3 = 3401,
DR_DUMP_REC_TYPE_ACTION_MODIFY_HDR = 3402,
DR_DUMP_REC_TYPE_ACTION_DROP = 3403,
DR_DUMP_REC_TYPE_ACTION_QP = 3404,
DR_DUMP_REC_TYPE_ACTION_FT = 3405,
DR_DUMP_REC_TYPE_ACTION_CTR = 3406,
DR_DUMP_REC_TYPE_ACTION_TAG = 3407,
DR_DUMP_REC_TYPE_ACTION_VPORT = 3408,
DR_DUMP_REC_TYPE_ACTION_DECAP_L2 = 3409,
DR_DUMP_REC_TYPE_ACTION_DECAP_L3 = 3410,
DR_DUMP_REC_TYPE_ACTION_DEVX_TIR = 3411,
DR_DUMP_REC_TYPE_ACTION_PUSH_VLAN = 3412,
DR_DUMP_REC_TYPE_ACTION_POP_VLAN = 3413,
DR_DUMP_REC_TYPE_ACTION_SAMPLER = 3415,
DR_DUMP_REC_TYPE_ACTION_INSERT_HDR = 3420,
DR_DUMP_REC_TYPE_ACTION_REMOVE_HDR = 3421
};
void mlx5dr_dbg_tbl_add(struct mlx5dr_table *tbl)
{
mutex_lock(&tbl->dmn->dump_info.dbg_mutex);
list_add_tail(&tbl->dbg_node, &tbl->dmn->dbg_tbl_list);
mutex_unlock(&tbl->dmn->dump_info.dbg_mutex);
}
void mlx5dr_dbg_tbl_del(struct mlx5dr_table *tbl)
{
mutex_lock(&tbl->dmn->dump_info.dbg_mutex);
list_del(&tbl->dbg_node);
mutex_unlock(&tbl->dmn->dump_info.dbg_mutex);
}
void mlx5dr_dbg_rule_add(struct mlx5dr_rule *rule)
{
struct mlx5dr_domain *dmn = rule->matcher->tbl->dmn;
mutex_lock(&dmn->dump_info.dbg_mutex);
list_add_tail(&rule->dbg_node, &rule->matcher->dbg_rule_list);
mutex_unlock(&dmn->dump_info.dbg_mutex);
}
void mlx5dr_dbg_rule_del(struct mlx5dr_rule *rule)
{
struct mlx5dr_domain *dmn = rule->matcher->tbl->dmn;
mutex_lock(&dmn->dump_info.dbg_mutex);
list_del(&rule->dbg_node);
mutex_unlock(&dmn->dump_info.dbg_mutex);
}
static u64 dr_dump_icm_to_idx(u64 icm_addr)
{
return (icm_addr >> 6) & 0xffffffff;
}
#define DR_HEX_SIZE 256
static void
dr_dump_hex_print(char hex[DR_HEX_SIZE], char *src, u32 size)
{
if (WARN_ON_ONCE(DR_HEX_SIZE < 2 * size + 1))
size = DR_HEX_SIZE / 2 - 1; /* truncate */
bin2hex(hex, src, size);
hex[2 * size] = 0; /* NULL-terminate */
}
static int
dr_dump_rule_action_mem(struct seq_file *file, const u64 rule_id,
struct mlx5dr_rule_action_member *action_mem)
{
struct mlx5dr_action *action = action_mem->action;
const u64 action_id = DR_DBG_PTR_TO_ID(action);
switch (action->action_type) {
case DR_ACTION_TYP_DROP:
seq_printf(file, "%d,0x%llx,0x%llx\n",
DR_DUMP_REC_TYPE_ACTION_DROP, action_id, rule_id);
break;
case DR_ACTION_TYP_FT:
if (action->dest_tbl->is_fw_tbl)
seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n",
DR_DUMP_REC_TYPE_ACTION_FT, action_id,
rule_id, action->dest_tbl->fw_tbl.id);
else
seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n",
DR_DUMP_REC_TYPE_ACTION_FT, action_id,
rule_id, action->dest_tbl->tbl->table_id);
break;
case DR_ACTION_TYP_CTR:
seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n",
DR_DUMP_REC_TYPE_ACTION_CTR, action_id, rule_id,
action->ctr->ctr_id + action->ctr->offset);
break;
case DR_ACTION_TYP_TAG:
seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n",
DR_DUMP_REC_TYPE_ACTION_TAG, action_id, rule_id,
action->flow_tag->flow_tag);
break;
case DR_ACTION_TYP_MODIFY_HDR:
seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n",
DR_DUMP_REC_TYPE_ACTION_MODIFY_HDR, action_id,
rule_id, action->rewrite->index);
break;
case DR_ACTION_TYP_VPORT:
seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n",
DR_DUMP_REC_TYPE_ACTION_VPORT, action_id, rule_id,
action->vport->caps->num);
break;
case DR_ACTION_TYP_TNL_L2_TO_L2:
seq_printf(file, "%d,0x%llx,0x%llx\n",
DR_DUMP_REC_TYPE_ACTION_DECAP_L2, action_id,
rule_id);
break;
case DR_ACTION_TYP_TNL_L3_TO_L2:
seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n",
DR_DUMP_REC_TYPE_ACTION_DECAP_L3, action_id,
rule_id, action->rewrite->index);
break;
case DR_ACTION_TYP_L2_TO_TNL_L2:
seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n",
DR_DUMP_REC_TYPE_ACTION_ENCAP_L2, action_id,
rule_id, action->reformat->id);
break;
case DR_ACTION_TYP_L2_TO_TNL_L3:
seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n",
DR_DUMP_REC_TYPE_ACTION_ENCAP_L3, action_id,
rule_id, action->reformat->id);
break;
case DR_ACTION_TYP_POP_VLAN:
seq_printf(file, "%d,0x%llx,0x%llx\n",
DR_DUMP_REC_TYPE_ACTION_POP_VLAN, action_id,
rule_id);
break;
case DR_ACTION_TYP_PUSH_VLAN:
seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n",
DR_DUMP_REC_TYPE_ACTION_PUSH_VLAN, action_id,
rule_id, action->push_vlan->vlan_hdr);
break;
case DR_ACTION_TYP_INSERT_HDR:
seq_printf(file, "%d,0x%llx,0x%llx,0x%x,0x%x,0x%x\n",
DR_DUMP_REC_TYPE_ACTION_INSERT_HDR, action_id,
rule_id, action->reformat->id,
action->reformat->param_0,
action->reformat->param_1);
break;
case DR_ACTION_TYP_REMOVE_HDR:
seq_printf(file, "%d,0x%llx,0x%llx,0x%x,0x%x,0x%x\n",
DR_DUMP_REC_TYPE_ACTION_REMOVE_HDR, action_id,
rule_id, action->reformat->id,
action->reformat->param_0,
action->reformat->param_1);
break;
case DR_ACTION_TYP_SAMPLER:
seq_printf(file,
"%d,0x%llx,0x%llx,0x%x,0x%x,0x%x,0x%llx,0x%llx\n",
DR_DUMP_REC_TYPE_ACTION_SAMPLER, action_id, rule_id,
0, 0, action->sampler->sampler_id,
action->sampler->rx_icm_addr,
action->sampler->tx_icm_addr);
break;
default:
return 0;
}
return 0;
}
static int
dr_dump_rule_mem(struct seq_file *file, struct mlx5dr_ste *ste,
bool is_rx, const u64 rule_id, u8 format_ver)
{
char hw_ste_dump[DR_HEX_SIZE];
u32 mem_rec_type;
if (format_ver == MLX5_STEERING_FORMAT_CONNECTX_5) {
mem_rec_type = is_rx ? DR_DUMP_REC_TYPE_RULE_RX_ENTRY_V0 :
DR_DUMP_REC_TYPE_RULE_TX_ENTRY_V0;
} else {
mem_rec_type = is_rx ? DR_DUMP_REC_TYPE_RULE_RX_ENTRY_V1 :
DR_DUMP_REC_TYPE_RULE_TX_ENTRY_V1;
}
dr_dump_hex_print(hw_ste_dump, (char *)ste->hw_ste, DR_STE_SIZE_REDUCED);
seq_printf(file, "%d,0x%llx,0x%llx,%s\n", mem_rec_type,
dr_dump_icm_to_idx(mlx5dr_ste_get_icm_addr(ste)), rule_id,
hw_ste_dump);
return 0;
}
static int
dr_dump_rule_rx_tx(struct seq_file *file, struct mlx5dr_rule_rx_tx *rule_rx_tx,
bool is_rx, const u64 rule_id, u8 format_ver)
{
struct mlx5dr_ste *ste_arr[DR_RULE_MAX_STES + DR_ACTION_MAX_STES];
struct mlx5dr_ste *curr_ste = rule_rx_tx->last_rule_ste;
int ret, i;
if (mlx5dr_rule_get_reverse_rule_members(ste_arr, curr_ste, &i))
return 0;
while (i--) {
ret = dr_dump_rule_mem(file, ste_arr[i], is_rx, rule_id,
format_ver);
if (ret < 0)
return ret;
}
return 0;
}
static int dr_dump_rule(struct seq_file *file, struct mlx5dr_rule *rule)
{
struct mlx5dr_rule_action_member *action_mem;
const u64 rule_id = DR_DBG_PTR_TO_ID(rule);
struct mlx5dr_rule_rx_tx *rx = &rule->rx;
struct mlx5dr_rule_rx_tx *tx = &rule->tx;
u8 format_ver;
int ret;
format_ver = rule->matcher->tbl->dmn->info.caps.sw_format_ver;
seq_printf(file, "%d,0x%llx,0x%llx\n", DR_DUMP_REC_TYPE_RULE, rule_id,
DR_DBG_PTR_TO_ID(rule->matcher));
if (rx->nic_matcher) {
ret = dr_dump_rule_rx_tx(file, rx, true, rule_id, format_ver);
if (ret < 0)
return ret;
}
if (tx->nic_matcher) {
ret = dr_dump_rule_rx_tx(file, tx, false, rule_id, format_ver);
if (ret < 0)
return ret;
}
list_for_each_entry(action_mem, &rule->rule_actions_list, list) {
ret = dr_dump_rule_action_mem(file, rule_id, action_mem);
if (ret < 0)
return ret;
}
return 0;
}
static int
dr_dump_matcher_mask(struct seq_file *file, struct mlx5dr_match_param *mask,
u8 criteria, const u64 matcher_id)
{
char dump[DR_HEX_SIZE];
seq_printf(file, "%d,0x%llx,", DR_DUMP_REC_TYPE_MATCHER_MASK,
matcher_id);
if (criteria & DR_MATCHER_CRITERIA_OUTER) {
dr_dump_hex_print(dump, (char *)&mask->outer, sizeof(mask->outer));
seq_printf(file, "%s,", dump);
} else {
seq_puts(file, ",");
}
if (criteria & DR_MATCHER_CRITERIA_INNER) {
dr_dump_hex_print(dump, (char *)&mask->inner, sizeof(mask->inner));
seq_printf(file, "%s,", dump);
} else {
seq_puts(file, ",");
}
if (criteria & DR_MATCHER_CRITERIA_MISC) {
dr_dump_hex_print(dump, (char *)&mask->misc, sizeof(mask->misc));
seq_printf(file, "%s,", dump);
} else {
seq_puts(file, ",");
}
if (criteria & DR_MATCHER_CRITERIA_MISC2) {
dr_dump_hex_print(dump, (char *)&mask->misc2, sizeof(mask->misc2));
seq_printf(file, "%s,", dump);
} else {
seq_puts(file, ",");
}
if (criteria & DR_MATCHER_CRITERIA_MISC3) {
dr_dump_hex_print(dump, (char *)&mask->misc3, sizeof(mask->misc3));
seq_printf(file, "%s\n", dump);
} else {
seq_puts(file, ",\n");
}
return 0;
}
static int
dr_dump_matcher_builder(struct seq_file *file, struct mlx5dr_ste_build *builder,
u32 index, bool is_rx, const u64 matcher_id)
{
seq_printf(file, "%d,0x%llx,%d,%d,0x%x\n",
DR_DUMP_REC_TYPE_MATCHER_BUILDER, matcher_id, index, is_rx,
builder->lu_type);
return 0;
}
static int
dr_dump_matcher_rx_tx(struct seq_file *file, bool is_rx,
struct mlx5dr_matcher_rx_tx *matcher_rx_tx,
const u64 matcher_id)
{
enum dr_dump_rec_type rec_type;
int i, ret;
rec_type = is_rx ? DR_DUMP_REC_TYPE_MATCHER_RX :
DR_DUMP_REC_TYPE_MATCHER_TX;
seq_printf(file, "%d,0x%llx,0x%llx,%d,0x%llx,0x%llx\n",
rec_type, DR_DBG_PTR_TO_ID(matcher_rx_tx),
matcher_id, matcher_rx_tx->num_of_builders,
dr_dump_icm_to_idx(matcher_rx_tx->s_htbl->chunk->icm_addr),
dr_dump_icm_to_idx(matcher_rx_tx->e_anchor->chunk->icm_addr));
for (i = 0; i < matcher_rx_tx->num_of_builders; i++) {
ret = dr_dump_matcher_builder(file,
&matcher_rx_tx->ste_builder[i],
i, is_rx, matcher_id);
if (ret < 0)
return ret;
}
return 0;
}
static int
dr_dump_matcher(struct seq_file *file, struct mlx5dr_matcher *matcher)
{
struct mlx5dr_matcher_rx_tx *rx = &matcher->rx;
struct mlx5dr_matcher_rx_tx *tx = &matcher->tx;
u64 matcher_id;
int ret;
matcher_id = DR_DBG_PTR_TO_ID(matcher);
seq_printf(file, "%d,0x%llx,0x%llx,%d\n", DR_DUMP_REC_TYPE_MATCHER,
matcher_id, DR_DBG_PTR_TO_ID(matcher->tbl), matcher->prio);
ret = dr_dump_matcher_mask(file, &matcher->mask,
matcher->match_criteria, matcher_id);
if (ret < 0)
return ret;
if (rx->nic_tbl) {
ret = dr_dump_matcher_rx_tx(file, true, rx, matcher_id);
if (ret < 0)
return ret;
}
if (tx->nic_tbl) {
ret = dr_dump_matcher_rx_tx(file, false, tx, matcher_id);
if (ret < 0)
return ret;
}
return 0;
}
static int
dr_dump_matcher_all(struct seq_file *file, struct mlx5dr_matcher *matcher)
{
struct mlx5dr_rule *rule;
int ret;
ret = dr_dump_matcher(file, matcher);
if (ret < 0)
return ret;
list_for_each_entry(rule, &matcher->dbg_rule_list, dbg_node) {
ret = dr_dump_rule(file, rule);
if (ret < 0)
return ret;
}
return 0;
}
static int
dr_dump_table_rx_tx(struct seq_file *file, bool is_rx,
struct mlx5dr_table_rx_tx *table_rx_tx,
const u64 table_id)
{
enum dr_dump_rec_type rec_type;
rec_type = is_rx ? DR_DUMP_REC_TYPE_TABLE_RX :
DR_DUMP_REC_TYPE_TABLE_TX;
seq_printf(file, "%d,0x%llx,0x%llx\n", rec_type, table_id,
dr_dump_icm_to_idx(table_rx_tx->s_anchor->chunk->icm_addr));
return 0;
}
static int dr_dump_table(struct seq_file *file, struct mlx5dr_table *table)
{
struct mlx5dr_table_rx_tx *rx = &table->rx;
struct mlx5dr_table_rx_tx *tx = &table->tx;
int ret;
seq_printf(file, "%d,0x%llx,0x%llx,%d,%d\n", DR_DUMP_REC_TYPE_TABLE,
DR_DBG_PTR_TO_ID(table), DR_DBG_PTR_TO_ID(table->dmn),
table->table_type, table->level);
if (rx->nic_dmn) {
ret = dr_dump_table_rx_tx(file, true, rx,
DR_DBG_PTR_TO_ID(table));
if (ret < 0)
return ret;
}
if (tx->nic_dmn) {
ret = dr_dump_table_rx_tx(file, false, tx,
DR_DBG_PTR_TO_ID(table));
if (ret < 0)
return ret;
}
return 0;
}
static int dr_dump_table_all(struct seq_file *file, struct mlx5dr_table *tbl)
{
struct mlx5dr_matcher *matcher;
int ret;
ret = dr_dump_table(file, tbl);
if (ret < 0)
return ret;
list_for_each_entry(matcher, &tbl->matcher_list, list_node) {
ret = dr_dump_matcher_all(file, matcher);
if (ret < 0)
return ret;
}
return 0;
}
static int
dr_dump_send_ring(struct seq_file *file, struct mlx5dr_send_ring *ring,
const u64 domain_id)
{
seq_printf(file, "%d,0x%llx,0x%llx,0x%x,0x%x\n",
DR_DUMP_REC_TYPE_DOMAIN_SEND_RING, DR_DBG_PTR_TO_ID(ring),
domain_id, ring->cq->mcq.cqn, ring->qp->qpn);
return 0;
}
static int
dr_dump_domain_info_flex_parser(struct seq_file *file,
const char *flex_parser_name,
const u8 flex_parser_value,
const u64 domain_id)
{
seq_printf(file, "%d,0x%llx,%s,0x%x\n",
DR_DUMP_REC_TYPE_DOMAIN_INFO_FLEX_PARSER, domain_id,
flex_parser_name, flex_parser_value);
return 0;
}
static int
dr_dump_domain_info_caps(struct seq_file *file, struct mlx5dr_cmd_caps *caps,
const u64 domain_id)
{
struct mlx5dr_cmd_vport_cap *vport_caps;
unsigned long i, vports_num;
xa_for_each(&caps->vports.vports_caps_xa, vports_num, vport_caps)
; /* count the number of vports in xarray */
seq_printf(file, "%d,0x%llx,0x%x,0x%llx,0x%llx,0x%x,%lu,%d\n",
DR_DUMP_REC_TYPE_DOMAIN_INFO_CAPS, domain_id, caps->gvmi,
caps->nic_rx_drop_address, caps->nic_tx_drop_address,
caps->flex_protocols, vports_num, caps->eswitch_manager);
xa_for_each(&caps->vports.vports_caps_xa, i, vport_caps) {
vport_caps = xa_load(&caps->vports.vports_caps_xa, i);
seq_printf(file, "%d,0x%llx,%lu,0x%x,0x%llx,0x%llx\n",
DR_DUMP_REC_TYPE_DOMAIN_INFO_VPORT, domain_id, i,
vport_caps->vport_gvmi, vport_caps->icm_address_rx,
vport_caps->icm_address_tx);
}
return 0;
}
static int
dr_dump_domain_info(struct seq_file *file, struct mlx5dr_domain_info *info,
const u64 domain_id)
{
int ret;
ret = dr_dump_domain_info_caps(file, &info->caps, domain_id);
if (ret < 0)
return ret;
ret = dr_dump_domain_info_flex_parser(file, "icmp_dw0",
info->caps.flex_parser_id_icmp_dw0,
domain_id);
if (ret < 0)
return ret;
ret = dr_dump_domain_info_flex_parser(file, "icmp_dw1",
info->caps.flex_parser_id_icmp_dw1,
domain_id);
if (ret < 0)
return ret;
ret = dr_dump_domain_info_flex_parser(file, "icmpv6_dw0",
info->caps.flex_parser_id_icmpv6_dw0,
domain_id);
if (ret < 0)
return ret;
ret = dr_dump_domain_info_flex_parser(file, "icmpv6_dw1",
info->caps.flex_parser_id_icmpv6_dw1,
domain_id);
if (ret < 0)
return ret;
return 0;
}
static int
dr_dump_domain(struct seq_file *file, struct mlx5dr_domain *dmn)
{
u64 domain_id = DR_DBG_PTR_TO_ID(dmn);
int ret;
seq_printf(file, "%d,0x%llx,%d,0%x,%d,%s\n", DR_DUMP_REC_TYPE_DOMAIN,
domain_id, dmn->type, dmn->info.caps.gvmi,
dmn->info.supp_sw_steering, pci_name(dmn->mdev->pdev));
ret = dr_dump_domain_info(file, &dmn->info, domain_id);
if (ret < 0)
return ret;
if (dmn->info.supp_sw_steering) {
ret = dr_dump_send_ring(file, dmn->send_ring, domain_id);
if (ret < 0)
return ret;
}
return 0;
}
static int dr_dump_domain_all(struct seq_file *file, struct mlx5dr_domain *dmn)
{
struct mlx5dr_table *tbl;
int ret;
mutex_lock(&dmn->dump_info.dbg_mutex);
mlx5dr_domain_lock(dmn);
ret = dr_dump_domain(file, dmn);
if (ret < 0)
goto unlock_mutex;
list_for_each_entry(tbl, &dmn->dbg_tbl_list, dbg_node) {
ret = dr_dump_table_all(file, tbl);
if (ret < 0)
break;
}
unlock_mutex:
mlx5dr_domain_unlock(dmn);
mutex_unlock(&dmn->dump_info.dbg_mutex);
return ret;
}
static int dr_dump_show(struct seq_file *file, void *priv)
{
return dr_dump_domain_all(file, file->private);
}
DEFINE_SHOW_ATTRIBUTE(dr_dump);
void mlx5dr_dbg_init_dump(struct mlx5dr_domain *dmn)
{
struct mlx5_core_dev *dev = dmn->mdev;
char file_name[128];
if (dmn->type != MLX5DR_DOMAIN_TYPE_FDB) {
mlx5_core_warn(dev,
"Steering dump is not supported for NIC RX/TX domains\n");
return;
}
dmn->dump_info.steering_debugfs =
debugfs_create_dir("steering", dev->priv.dbg_root);
dmn->dump_info.fdb_debugfs =
debugfs_create_dir("fdb", dmn->dump_info.steering_debugfs);
sprintf(file_name, "dmn_%p", dmn);
debugfs_create_file(file_name, 0444, dmn->dump_info.fdb_debugfs,
dmn, &dr_dump_fops);
INIT_LIST_HEAD(&dmn->dbg_tbl_list);
mutex_init(&dmn->dump_info.dbg_mutex);
}
void mlx5dr_dbg_uninit_dump(struct mlx5dr_domain *dmn)
{
debugfs_remove_recursive(dmn->dump_info.steering_debugfs);
mutex_destroy(&dmn->dump_info.dbg_mutex);
}

View File

@ -0,0 +1,15 @@
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
struct mlx5dr_dbg_dump_info {
struct mutex dbg_mutex; /* protect dbg lists */
struct dentry *steering_debugfs;
struct dentry *fdb_debugfs;
};
void mlx5dr_dbg_init_dump(struct mlx5dr_domain *dmn);
void mlx5dr_dbg_uninit_dump(struct mlx5dr_domain *dmn);
void mlx5dr_dbg_tbl_add(struct mlx5dr_table *tbl);
void mlx5dr_dbg_tbl_del(struct mlx5dr_table *tbl);
void mlx5dr_dbg_rule_add(struct mlx5dr_rule *rule);
void mlx5dr_dbg_rule_del(struct mlx5dr_rule *rule);

View File

@ -395,7 +395,7 @@ mlx5dr_domain_create(struct mlx5_core_dev *mdev, enum mlx5dr_domain_type type)
}
dr_domain_init_csum_recalc_fts(dmn);
mlx5dr_dbg_init_dump(dmn);
return dmn;
uninit_caps:
@ -431,11 +431,12 @@ int mlx5dr_domain_sync(struct mlx5dr_domain *dmn, u32 flags)
int mlx5dr_domain_destroy(struct mlx5dr_domain *dmn)
{
if (refcount_read(&dmn->refcount) > 1)
if (WARN_ON_ONCE(refcount_read(&dmn->refcount) > 1))
return -EBUSY;
/* make sure resources are not used by the hardware */
mlx5dr_cmd_sync_steering(dmn->mdev);
mlx5dr_dbg_uninit_dump(dmn);
dr_domain_uninit_csum_recalc_fts(dmn);
dr_domain_uninit_resources(dmn);
dr_domain_caps_uninit(dmn);

View File

@ -140,6 +140,19 @@ static bool dr_mask_is_tnl_geneve_tlv_opt(struct mlx5dr_match_misc3 *misc3)
return misc3->geneve_tlv_option_0_data;
}
static bool
dr_matcher_supp_flex_parser_ok(struct mlx5dr_cmd_caps *caps)
{
return caps->flex_parser_ok_bits_supp;
}
static bool dr_mask_is_tnl_geneve_tlv_opt_exist_set(struct mlx5dr_match_misc *misc,
struct mlx5dr_domain *dmn)
{
return dr_matcher_supp_flex_parser_ok(&dmn->info.caps) &&
misc->geneve_tlv_option_0_exist;
}
static bool
dr_matcher_supp_tnl_geneve(struct mlx5dr_cmd_caps *caps)
{
@ -359,7 +372,7 @@ static bool dr_mask_is_tnl_mpls_over_gre(struct mlx5dr_match_param *mask,
static int dr_matcher_supp_tnl_mpls_over_udp(struct mlx5dr_cmd_caps *caps)
{
return caps->flex_protocols & mlx5_FLEX_PARSER_MPLS_OVER_UDP_ENABLED;
return caps->flex_protocols & MLX5_FLEX_PARSER_MPLS_OVER_UDP_ENABLED;
}
static bool dr_mask_is_tnl_mpls_over_udp(struct mlx5dr_match_param *mask,
@ -368,6 +381,12 @@ static bool dr_mask_is_tnl_mpls_over_udp(struct mlx5dr_match_param *mask,
return DR_MASK_IS_OUTER_MPLS_OVER_UDP_SET(&mask->misc2) &&
dr_matcher_supp_tnl_mpls_over_udp(&dmn->info.caps);
}
static bool dr_mask_is_tnl_header_0_1_set(struct mlx5dr_match_misc5 *misc5)
{
return misc5->tunnel_header_0 || misc5->tunnel_header_1;
}
int mlx5dr_matcher_select_builders(struct mlx5dr_matcher *matcher,
struct mlx5dr_matcher_rx_tx *nic_matcher,
enum mlx5dr_ipv outer_ipv,
@ -424,6 +443,9 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
if (matcher->match_criteria & DR_MATCHER_CRITERIA_MISC4)
mask.misc4 = matcher->mask.misc4;
if (matcher->match_criteria & DR_MATCHER_CRITERIA_MISC5)
mask.misc5 = matcher->mask.misc5;
ret = mlx5dr_ste_build_pre_check(dmn, matcher->match_criteria,
&matcher->mask, NULL);
if (ret)
@ -443,7 +465,8 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
if (matcher->match_criteria & (DR_MATCHER_CRITERIA_OUTER |
DR_MATCHER_CRITERIA_MISC |
DR_MATCHER_CRITERIA_MISC2 |
DR_MATCHER_CRITERIA_MISC3)) {
DR_MATCHER_CRITERIA_MISC3 |
DR_MATCHER_CRITERIA_MISC5)) {
inner = false;
if (dr_mask_is_wqe_metadata_set(&mask.misc2))
@ -511,6 +534,10 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
mlx5dr_ste_build_tnl_geneve_tlv_opt(ste_ctx, &sb[idx++],
&mask, &dmn->info.caps,
inner, rx);
if (dr_mask_is_tnl_geneve_tlv_opt_exist_set(&mask.misc, dmn))
mlx5dr_ste_build_tnl_geneve_tlv_opt_exist(ste_ctx, &sb[idx++],
&mask, &dmn->info.caps,
inner, rx);
} else if (dr_mask_is_tnl_gtpu_any(&mask, dmn)) {
if (dr_mask_is_tnl_gtpu_flex_parser_0(&mask, dmn))
mlx5dr_ste_build_tnl_gtpu_flex_parser_0(ste_ctx, &sb[idx++],
@ -525,6 +552,9 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
if (dr_mask_is_tnl_gtpu(&mask, dmn))
mlx5dr_ste_build_tnl_gtpu(ste_ctx, &sb[idx++],
&mask, inner, rx);
} else if (dr_mask_is_tnl_header_0_1_set(&mask.misc5)) {
mlx5dr_ste_build_tnl_header_0_1(ste_ctx, &sb[idx++],
&mask, inner, rx);
}
if (DR_MASK_IS_ETH_L4_MISC_SET(mask.misc3, outer))
@ -653,10 +683,10 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
return 0;
}
static int dr_matcher_connect(struct mlx5dr_domain *dmn,
struct mlx5dr_matcher_rx_tx *curr_nic_matcher,
struct mlx5dr_matcher_rx_tx *next_nic_matcher,
struct mlx5dr_matcher_rx_tx *prev_nic_matcher)
static int dr_nic_matcher_connect(struct mlx5dr_domain *dmn,
struct mlx5dr_matcher_rx_tx *curr_nic_matcher,
struct mlx5dr_matcher_rx_tx *next_nic_matcher,
struct mlx5dr_matcher_rx_tx *prev_nic_matcher)
{
struct mlx5dr_table_rx_tx *nic_tbl = curr_nic_matcher->nic_tbl;
struct mlx5dr_domain_rx_tx *nic_dmn = nic_tbl->nic_dmn;
@ -712,58 +742,50 @@ static int dr_matcher_connect(struct mlx5dr_domain *dmn,
return 0;
}
static int dr_matcher_add_to_tbl(struct mlx5dr_matcher *matcher)
int mlx5dr_matcher_add_to_tbl_nic(struct mlx5dr_domain *dmn,
struct mlx5dr_matcher_rx_tx *nic_matcher)
{
struct mlx5dr_matcher *next_matcher, *prev_matcher, *tmp_matcher;
struct mlx5dr_table *tbl = matcher->tbl;
struct mlx5dr_domain *dmn = tbl->dmn;
struct mlx5dr_matcher_rx_tx *next_nic_matcher, *prev_nic_matcher, *tmp_nic_matcher;
struct mlx5dr_table_rx_tx *nic_tbl = nic_matcher->nic_tbl;
bool first = true;
int ret;
next_matcher = NULL;
list_for_each_entry(tmp_matcher, &tbl->matcher_list, matcher_list) {
if (tmp_matcher->prio >= matcher->prio) {
next_matcher = tmp_matcher;
/* If the nic matcher is already on its parent nic table list,
* then it is already connected to the chain of nic matchers.
*/
if (!list_empty(&nic_matcher->list_node))
return 0;
next_nic_matcher = NULL;
list_for_each_entry(tmp_nic_matcher, &nic_tbl->nic_matcher_list, list_node) {
if (tmp_nic_matcher->prio >= nic_matcher->prio) {
next_nic_matcher = tmp_nic_matcher;
break;
}
first = false;
}
prev_matcher = NULL;
if (next_matcher && !first)
prev_matcher = list_prev_entry(next_matcher, matcher_list);
prev_nic_matcher = NULL;
if (next_nic_matcher && !first)
prev_nic_matcher = list_prev_entry(next_nic_matcher, list_node);
else if (!first)
prev_matcher = list_last_entry(&tbl->matcher_list,
struct mlx5dr_matcher,
matcher_list);
prev_nic_matcher = list_last_entry(&nic_tbl->nic_matcher_list,
struct mlx5dr_matcher_rx_tx,
list_node);
if (dmn->type == MLX5DR_DOMAIN_TYPE_FDB ||
dmn->type == MLX5DR_DOMAIN_TYPE_NIC_RX) {
ret = dr_matcher_connect(dmn, &matcher->rx,
next_matcher ? &next_matcher->rx : NULL,
prev_matcher ? &prev_matcher->rx : NULL);
if (ret)
return ret;
}
ret = dr_nic_matcher_connect(dmn, nic_matcher,
next_nic_matcher, prev_nic_matcher);
if (ret)
return ret;
if (dmn->type == MLX5DR_DOMAIN_TYPE_FDB ||
dmn->type == MLX5DR_DOMAIN_TYPE_NIC_TX) {
ret = dr_matcher_connect(dmn, &matcher->tx,
next_matcher ? &next_matcher->tx : NULL,
prev_matcher ? &prev_matcher->tx : NULL);
if (ret)
return ret;
}
if (prev_matcher)
list_add(&matcher->matcher_list, &prev_matcher->matcher_list);
else if (next_matcher)
list_add_tail(&matcher->matcher_list,
&next_matcher->matcher_list);
if (prev_nic_matcher)
list_add(&nic_matcher->list_node, &prev_nic_matcher->list_node);
else if (next_nic_matcher)
list_add_tail(&nic_matcher->list_node, &next_nic_matcher->list_node);
else
list_add(&matcher->matcher_list, &tbl->matcher_list);
list_add(&nic_matcher->list_node, &nic_matcher->nic_tbl->nic_matcher_list);
return 0;
return ret;
}
static void dr_matcher_uninit_nic(struct mlx5dr_matcher_rx_tx *nic_matcher)
@ -822,6 +844,9 @@ static int dr_matcher_init_nic(struct mlx5dr_matcher *matcher,
struct mlx5dr_domain *dmn = matcher->tbl->dmn;
int ret;
nic_matcher->prio = matcher->prio;
INIT_LIST_HEAD(&nic_matcher->list_node);
ret = dr_matcher_set_all_ste_builders(matcher, nic_matcher);
if (ret)
return ret;
@ -872,13 +897,12 @@ uninit_nic_rx:
return ret;
}
static int dr_matcher_init(struct mlx5dr_matcher *matcher,
struct mlx5dr_match_parameters *mask)
static int dr_matcher_copy_param(struct mlx5dr_matcher *matcher,
struct mlx5dr_match_parameters *mask)
{
struct mlx5dr_domain *dmn = matcher->tbl->dmn;
struct mlx5dr_match_parameters consumed_mask;
struct mlx5dr_table *tbl = matcher->tbl;
struct mlx5dr_domain *dmn = tbl->dmn;
int i, ret;
int i, ret = 0;
if (matcher->match_criteria >= DR_MATCHER_CRITERIA_MAX) {
mlx5dr_err(dmn, "Invalid match criteria attribute\n");
@ -898,10 +922,36 @@ static int dr_matcher_init(struct mlx5dr_matcher *matcher,
consumed_mask.match_sz = mask->match_sz;
memcpy(consumed_mask.match_buf, mask->match_buf, mask->match_sz);
mlx5dr_ste_copy_param(matcher->match_criteria,
&matcher->mask, &consumed_mask,
true);
&matcher->mask, &consumed_mask, true);
/* Check that all mask data was consumed */
for (i = 0; i < consumed_mask.match_sz; i++) {
if (!((u8 *)consumed_mask.match_buf)[i])
continue;
mlx5dr_dbg(dmn,
"Match param mask contains unsupported parameters\n");
ret = -EOPNOTSUPP;
break;
}
kfree(consumed_mask.match_buf);
}
return ret;
}
static int dr_matcher_init(struct mlx5dr_matcher *matcher,
struct mlx5dr_match_parameters *mask)
{
struct mlx5dr_table *tbl = matcher->tbl;
struct mlx5dr_domain *dmn = tbl->dmn;
int ret;
ret = dr_matcher_copy_param(matcher, mask);
if (ret)
return ret;
switch (dmn->type) {
case MLX5DR_DOMAIN_TYPE_NIC_RX:
matcher->rx.nic_tbl = &tbl->rx;
@ -919,25 +969,25 @@ static int dr_matcher_init(struct mlx5dr_matcher *matcher,
default:
WARN_ON(true);
ret = -EINVAL;
goto free_consumed_mask;
}
/* Check that all mask data was consumed */
for (i = 0; i < consumed_mask.match_sz; i++) {
if (!((u8 *)consumed_mask.match_buf)[i])
continue;
mlx5dr_dbg(dmn, "Match param mask contains unsupported parameters\n");
ret = -EOPNOTSUPP;
goto free_consumed_mask;
}
ret = 0;
free_consumed_mask:
kfree(consumed_mask.match_buf);
return ret;
}
static void dr_matcher_add_to_dbg_list(struct mlx5dr_matcher *matcher)
{
mutex_lock(&matcher->tbl->dmn->dump_info.dbg_mutex);
list_add(&matcher->list_node, &matcher->tbl->matcher_list);
mutex_unlock(&matcher->tbl->dmn->dump_info.dbg_mutex);
}
static void dr_matcher_remove_from_dbg_list(struct mlx5dr_matcher *matcher)
{
mutex_lock(&matcher->tbl->dmn->dump_info.dbg_mutex);
list_del(&matcher->list_node);
mutex_unlock(&matcher->tbl->dmn->dump_info.dbg_mutex);
}
struct mlx5dr_matcher *
mlx5dr_matcher_create(struct mlx5dr_table *tbl,
u32 priority,
@ -957,7 +1007,8 @@ mlx5dr_matcher_create(struct mlx5dr_table *tbl,
matcher->prio = priority;
matcher->match_criteria = match_criteria_enable;
refcount_set(&matcher->refcount, 1);
INIT_LIST_HEAD(&matcher->matcher_list);
INIT_LIST_HEAD(&matcher->list_node);
INIT_LIST_HEAD(&matcher->dbg_rule_list);
mlx5dr_domain_lock(tbl->dmn);
@ -965,16 +1016,12 @@ mlx5dr_matcher_create(struct mlx5dr_table *tbl,
if (ret)
goto free_matcher;
ret = dr_matcher_add_to_tbl(matcher);
if (ret)
goto matcher_uninit;
dr_matcher_add_to_dbg_list(matcher);
mlx5dr_domain_unlock(tbl->dmn);
return matcher;
matcher_uninit:
dr_matcher_uninit(matcher);
free_matcher:
mlx5dr_domain_unlock(tbl->dmn);
kfree(matcher);
@ -983,10 +1030,10 @@ dec_ref:
return NULL;
}
static int dr_matcher_disconnect(struct mlx5dr_domain *dmn,
struct mlx5dr_table_rx_tx *nic_tbl,
struct mlx5dr_matcher_rx_tx *next_nic_matcher,
struct mlx5dr_matcher_rx_tx *prev_nic_matcher)
static int dr_matcher_disconnect_nic(struct mlx5dr_domain *dmn,
struct mlx5dr_table_rx_tx *nic_tbl,
struct mlx5dr_matcher_rx_tx *next_nic_matcher,
struct mlx5dr_matcher_rx_tx *prev_nic_matcher)
{
struct mlx5dr_domain_rx_tx *nic_dmn = nic_tbl->nic_dmn;
struct mlx5dr_htbl_connect_info info;
@ -1013,43 +1060,34 @@ static int dr_matcher_disconnect(struct mlx5dr_domain *dmn,
&info, true);
}
static int dr_matcher_remove_from_tbl(struct mlx5dr_matcher *matcher)
int mlx5dr_matcher_remove_from_tbl_nic(struct mlx5dr_domain *dmn,
struct mlx5dr_matcher_rx_tx *nic_matcher)
{
struct mlx5dr_matcher *prev_matcher, *next_matcher;
struct mlx5dr_table *tbl = matcher->tbl;
struct mlx5dr_domain *dmn = tbl->dmn;
int ret = 0;
struct mlx5dr_matcher_rx_tx *prev_nic_matcher, *next_nic_matcher;
struct mlx5dr_table_rx_tx *nic_tbl = nic_matcher->nic_tbl;
int ret;
if (list_is_last(&matcher->matcher_list, &tbl->matcher_list))
next_matcher = NULL;
/* If the nic matcher is not on its parent nic table list,
* then it is detached - no need to disconnect it.
*/
if (list_empty(&nic_matcher->list_node))
return 0;
if (list_is_last(&nic_matcher->list_node, &nic_tbl->nic_matcher_list))
next_nic_matcher = NULL;
else
next_matcher = list_next_entry(matcher, matcher_list);
next_nic_matcher = list_next_entry(nic_matcher, list_node);
if (matcher->matcher_list.prev == &tbl->matcher_list)
prev_matcher = NULL;
if (nic_matcher->list_node.prev == &nic_tbl->nic_matcher_list)
prev_nic_matcher = NULL;
else
prev_matcher = list_prev_entry(matcher, matcher_list);
prev_nic_matcher = list_prev_entry(nic_matcher, list_node);
if (dmn->type == MLX5DR_DOMAIN_TYPE_FDB ||
dmn->type == MLX5DR_DOMAIN_TYPE_NIC_RX) {
ret = dr_matcher_disconnect(dmn, &tbl->rx,
next_matcher ? &next_matcher->rx : NULL,
prev_matcher ? &prev_matcher->rx : NULL);
if (ret)
return ret;
}
if (dmn->type == MLX5DR_DOMAIN_TYPE_FDB ||
dmn->type == MLX5DR_DOMAIN_TYPE_NIC_TX) {
ret = dr_matcher_disconnect(dmn, &tbl->tx,
next_matcher ? &next_matcher->tx : NULL,
prev_matcher ? &prev_matcher->tx : NULL);
if (ret)
return ret;
}
list_del(&matcher->matcher_list);
ret = dr_matcher_disconnect_nic(dmn, nic_tbl, next_nic_matcher, prev_nic_matcher);
if (ret)
return ret;
list_del_init(&nic_matcher->list_node);
return 0;
}
@ -1057,12 +1095,12 @@ int mlx5dr_matcher_destroy(struct mlx5dr_matcher *matcher)
{
struct mlx5dr_table *tbl = matcher->tbl;
if (refcount_read(&matcher->refcount) > 1)
if (WARN_ON_ONCE(refcount_read(&matcher->refcount) > 1))
return -EBUSY;
mlx5dr_domain_lock(tbl->dmn);
dr_matcher_remove_from_tbl(matcher);
dr_matcher_remove_from_dbg_list(matcher);
dr_matcher_uninit(matcher);
refcount_dec(&matcher->tbl->refcount);

View File

@ -5,11 +5,6 @@
#define DR_RULE_MAX_STE_CHAIN (DR_RULE_MAX_STES + DR_ACTION_MAX_STES)
struct mlx5dr_rule_action_member {
struct mlx5dr_action *action;
struct list_head list;
};
static int dr_rule_append_to_miss_list(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste *new_last_ste,
struct list_head *miss_list,
@ -979,14 +974,36 @@ static bool dr_rule_verify(struct mlx5dr_matcher *matcher,
return false;
}
}
if (match_criteria & DR_MATCHER_CRITERIA_MISC5) {
s_idx = offsetof(struct mlx5dr_match_param, misc5);
e_idx = min(s_idx + sizeof(param->misc5), value_size);
if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
mlx5dr_err(matcher->tbl->dmn, "Rule misc5 parameters contains a value not specified by mask\n");
return false;
}
}
return true;
}
static int dr_rule_destroy_rule_nic(struct mlx5dr_rule *rule,
struct mlx5dr_rule_rx_tx *nic_rule)
{
/* Check if this nic rule was actually created, or was it skipped
* and only the other type of the RX/TX nic rule was created.
*/
if (!nic_rule->last_rule_ste)
return 0;
mlx5dr_domain_nic_lock(nic_rule->nic_matcher->nic_tbl->nic_dmn);
dr_rule_clean_rule_members(rule, nic_rule);
nic_rule->nic_matcher->rules--;
if (!nic_rule->nic_matcher->rules)
mlx5dr_matcher_remove_from_tbl_nic(rule->matcher->tbl->dmn,
nic_rule->nic_matcher);
mlx5dr_domain_nic_unlock(nic_rule->nic_matcher->nic_tbl->nic_dmn);
return 0;
@ -1003,6 +1020,8 @@ static int dr_rule_destroy_rule(struct mlx5dr_rule *rule)
{
struct mlx5dr_domain *dmn = rule->matcher->tbl->dmn;
mlx5dr_dbg_rule_del(rule);
switch (dmn->type) {
case MLX5DR_DOMAIN_TYPE_NIC_RX:
dr_rule_destroy_rule_nic(rule, &rule->rx);
@ -1091,24 +1110,28 @@ dr_rule_create_rule_nic(struct mlx5dr_rule *rule,
mlx5dr_domain_nic_lock(nic_dmn);
ret = mlx5dr_matcher_add_to_tbl_nic(dmn, nic_matcher);
if (ret)
goto free_hw_ste;
ret = mlx5dr_matcher_select_builders(matcher,
nic_matcher,
dr_rule_get_ipv(&param->outer),
dr_rule_get_ipv(&param->inner));
if (ret)
goto free_hw_ste;
goto remove_from_nic_tbl;
/* Set the tag values inside the ste array */
ret = mlx5dr_ste_build_ste_arr(matcher, nic_matcher, param, hw_ste_arr);
if (ret)
goto free_hw_ste;
goto remove_from_nic_tbl;
/* Set the actions values/addresses inside the ste array */
ret = mlx5dr_actions_build_ste_arr(matcher, nic_matcher, actions,
num_actions, hw_ste_arr,
&new_hw_ste_arr_sz);
if (ret)
goto free_hw_ste;
goto remove_from_nic_tbl;
cur_htbl = nic_matcher->s_htbl;
@ -1155,6 +1178,8 @@ dr_rule_create_rule_nic(struct mlx5dr_rule *rule,
if (htbl)
mlx5dr_htbl_put(htbl);
nic_matcher->rules++;
mlx5dr_domain_nic_unlock(nic_dmn);
kfree(hw_ste_arr);
@ -1168,6 +1193,10 @@ free_rule:
list_del(&ste_info->send_list);
kfree(ste_info);
}
remove_from_nic_tbl:
mlx5dr_matcher_remove_from_tbl_nic(dmn, nic_matcher);
free_hw_ste:
mlx5dr_domain_nic_unlock(nic_dmn);
kfree(hw_ste_arr);
@ -1257,6 +1286,8 @@ dr_rule_create_rule(struct mlx5dr_matcher *matcher,
if (ret)
goto remove_action_members;
INIT_LIST_HEAD(&rule->dbg_node);
mlx5dr_dbg_rule_add(rule);
return rule;
remove_action_members:

View File

@ -719,6 +719,8 @@ static void dr_ste_copy_mask_misc(char *mask, struct mlx5dr_match_misc *spec, bo
spec->vxlan_vni = IFC_GET_CLR(fte_match_set_misc, mask, vxlan_vni, clr);
spec->geneve_vni = IFC_GET_CLR(fte_match_set_misc, mask, geneve_vni, clr);
spec->geneve_tlv_option_0_exist =
IFC_GET_CLR(fte_match_set_misc, mask, geneve_tlv_option_0_exist, clr);
spec->geneve_oam = IFC_GET_CLR(fte_match_set_misc, mask, geneve_oam, clr);
spec->outer_ipv6_flow_label =
@ -880,6 +882,26 @@ static void dr_ste_copy_mask_misc4(char *mask, struct mlx5dr_match_misc4 *spec,
IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_value_3, clr);
}
static void dr_ste_copy_mask_misc5(char *mask, struct mlx5dr_match_misc5 *spec, bool clr)
{
spec->macsec_tag_0 =
IFC_GET_CLR(fte_match_set_misc5, mask, macsec_tag_0, clr);
spec->macsec_tag_1 =
IFC_GET_CLR(fte_match_set_misc5, mask, macsec_tag_1, clr);
spec->macsec_tag_2 =
IFC_GET_CLR(fte_match_set_misc5, mask, macsec_tag_2, clr);
spec->macsec_tag_3 =
IFC_GET_CLR(fte_match_set_misc5, mask, macsec_tag_3, clr);
spec->tunnel_header_0 =
IFC_GET_CLR(fte_match_set_misc5, mask, tunnel_header_0, clr);
spec->tunnel_header_1 =
IFC_GET_CLR(fte_match_set_misc5, mask, tunnel_header_1, clr);
spec->tunnel_header_2 =
IFC_GET_CLR(fte_match_set_misc5, mask, tunnel_header_2, clr);
spec->tunnel_header_3 =
IFC_GET_CLR(fte_match_set_misc5, mask, tunnel_header_3, clr);
}
void mlx5dr_ste_copy_param(u8 match_criteria,
struct mlx5dr_match_param *set_param,
struct mlx5dr_match_parameters *mask,
@ -966,6 +988,20 @@ void mlx5dr_ste_copy_param(u8 match_criteria,
}
dr_ste_copy_mask_misc4(buff, &set_param->misc4, clr);
}
param_location += sizeof(struct mlx5dr_match_misc4);
if (match_criteria & DR_MATCHER_CRITERIA_MISC5) {
if (mask->match_sz < param_location +
sizeof(struct mlx5dr_match_misc5)) {
memcpy(tail_param, data + param_location,
mask->match_sz - param_location);
buff = tail_param;
} else {
buff = data + param_location;
}
dr_ste_copy_mask_misc5(buff, &set_param->misc5, clr);
}
}
void mlx5dr_ste_build_eth_l2_src_dst(struct mlx5dr_ste_ctx *ste_ctx,
@ -1180,6 +1216,21 @@ void mlx5dr_ste_build_tnl_geneve_tlv_opt(struct mlx5dr_ste_ctx *ste_ctx,
ste_ctx->build_tnl_geneve_tlv_opt_init(sb, mask);
}
void mlx5dr_ste_build_tnl_geneve_tlv_opt_exist(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
struct mlx5dr_cmd_caps *caps,
bool inner, bool rx)
{
if (!ste_ctx->build_tnl_geneve_tlv_opt_exist_init)
return;
sb->rx = rx;
sb->caps = caps;
sb->inner = inner;
ste_ctx->build_tnl_geneve_tlv_opt_exist_init(sb, mask);
}
void mlx5dr_ste_build_tnl_gtpu(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
@ -1269,6 +1320,16 @@ void mlx5dr_ste_build_flex_parser_1(struct mlx5dr_ste_ctx *ste_ctx,
ste_ctx->build_flex_parser_1_init(sb, mask);
}
void mlx5dr_ste_build_tnl_header_0_1(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx)
{
sb->rx = rx;
sb->inner = inner;
ste_ctx->build_tnl_header_0_1_init(sb, mask);
}
static struct mlx5dr_ste_ctx *mlx5dr_ste_ctx_arr[] = {
[MLX5_STEERING_FORMAT_CONNECTX_5] = &ste_ctx_v0,
[MLX5_STEERING_FORMAT_CONNECTX_6DX] = &ste_ctx_v1,

View File

@ -135,12 +135,14 @@ struct mlx5dr_ste_ctx {
void DR_STE_CTX_BUILDER(tnl_vxlan_gpe);
void DR_STE_CTX_BUILDER(tnl_geneve);
void DR_STE_CTX_BUILDER(tnl_geneve_tlv_opt);
void DR_STE_CTX_BUILDER(tnl_geneve_tlv_opt_exist);
void DR_STE_CTX_BUILDER(register_0);
void DR_STE_CTX_BUILDER(register_1);
void DR_STE_CTX_BUILDER(src_gvmi_qpn);
void DR_STE_CTX_BUILDER(flex_parser_0);
void DR_STE_CTX_BUILDER(flex_parser_1);
void DR_STE_CTX_BUILDER(tnl_gtpu);
void DR_STE_CTX_BUILDER(tnl_header_0_1);
void DR_STE_CTX_BUILDER(tnl_gtpu_flex_parser_0);
void DR_STE_CTX_BUILDER(tnl_gtpu_flex_parser_1);

View File

@ -80,6 +80,7 @@ enum {
DR_STE_V0_LU_TYPE_GENERAL_PURPOSE = 0x18,
DR_STE_V0_LU_TYPE_STEERING_REGISTERS_0 = 0x2f,
DR_STE_V0_LU_TYPE_STEERING_REGISTERS_1 = 0x30,
DR_STE_V0_LU_TYPE_TUNNEL_HEADER = 0x34,
DR_STE_V0_LU_TYPE_DONT_CARE = MLX5DR_STE_LU_TYPE_DONT_CARE,
};
@ -1704,7 +1705,7 @@ static void dr_ste_v0_set_flex_parser(u32 *misc4_field_id,
u32 id = *misc4_field_id;
u8 *parser_ptr;
if (parser_is_used[id])
if (id >= DR_NUM_OF_FLEX_PARSERS || parser_is_used[id])
return;
parser_is_used[id] = true;
@ -1875,6 +1876,27 @@ dr_ste_v0_build_tnl_gtpu_flex_parser_1_init(struct mlx5dr_ste_build *sb,
sb->ste_build_tag_func = &dr_ste_v0_build_tnl_gtpu_flex_parser_1_tag;
}
static int dr_ste_v0_build_tnl_header_0_1_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
uint8_t *tag)
{
struct mlx5dr_match_misc5 *misc5 = &value->misc5;
DR_STE_SET_TAG(tunnel_header, tag, tunnel_header_0, misc5, tunnel_header_0);
DR_STE_SET_TAG(tunnel_header, tag, tunnel_header_1, misc5, tunnel_header_1);
return 0;
}
static void dr_ste_v0_build_tnl_header_0_1_init(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask)
{
sb->lu_type = DR_STE_V0_LU_TYPE_TUNNEL_HEADER;
dr_ste_v0_build_tnl_header_0_1_tag(mask, sb, sb->bit_mask);
sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
sb->ste_build_tag_func = &dr_ste_v0_build_tnl_header_0_1_tag;
}
struct mlx5dr_ste_ctx ste_ctx_v0 = {
/* Builders */
.build_eth_l2_src_dst_init = &dr_ste_v0_build_eth_l2_src_dst_init,
@ -1903,6 +1925,7 @@ struct mlx5dr_ste_ctx ste_ctx_v0 = {
.build_flex_parser_0_init = &dr_ste_v0_build_flex_parser_0_init,
.build_flex_parser_1_init = &dr_ste_v0_build_flex_parser_1_init,
.build_tnl_gtpu_init = &dr_ste_v0_build_flex_parser_tnl_gtpu_init,
.build_tnl_header_0_1_init = &dr_ste_v0_build_tnl_header_0_1_init,
.build_tnl_gtpu_flex_parser_0_init = &dr_ste_v0_build_tnl_gtpu_flex_parser_0_init,
.build_tnl_gtpu_flex_parser_1_init = &dr_ste_v0_build_tnl_gtpu_flex_parser_1_init,

View File

@ -47,6 +47,7 @@ enum {
DR_STE_V1_LU_TYPE_ETHL3_IPV4_MISC_I = 0x000f,
DR_STE_V1_LU_TYPE_STEERING_REGISTERS_0 = 0x010f,
DR_STE_V1_LU_TYPE_STEERING_REGISTERS_1 = 0x0110,
DR_STE_V1_LU_TYPE_FLEX_PARSER_OK = 0x0011,
DR_STE_V1_LU_TYPE_FLEX_PARSER_0 = 0x0111,
DR_STE_V1_LU_TYPE_FLEX_PARSER_1 = 0x0112,
DR_STE_V1_LU_TYPE_ETHL4_MISC_O = 0x0113,
@ -1713,6 +1714,27 @@ dr_ste_v1_build_flex_parser_tnl_geneve_init(struct mlx5dr_ste_build *sb,
sb->ste_build_tag_func = &dr_ste_v1_build_flex_parser_tnl_geneve_tag;
}
static int dr_ste_v1_build_tnl_header_0_1_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
uint8_t *tag)
{
struct mlx5dr_match_misc5 *misc5 = &value->misc5;
DR_STE_SET_TAG(tunnel_header, tag, tunnel_header_0, misc5, tunnel_header_0);
DR_STE_SET_TAG(tunnel_header, tag, tunnel_header_1, misc5, tunnel_header_1);
return 0;
}
static void dr_ste_v1_build_tnl_header_0_1_init(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask)
{
sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_TNL_HEADER;
dr_ste_v1_build_tnl_header_0_1_tag(mask, sb, sb->bit_mask);
sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
sb->ste_build_tag_func = &dr_ste_v1_build_tnl_header_0_1_tag;
}
static int dr_ste_v1_build_register_0_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
u8 *tag)
@ -1833,7 +1855,7 @@ static void dr_ste_v1_set_flex_parser(u32 *misc4_field_id,
u32 id = *misc4_field_id;
u8 *parser_ptr;
if (parser_is_used[id])
if (id >= DR_NUM_OF_FLEX_PARSERS || parser_is_used[id])
return;
parser_is_used[id] = true;
@ -1921,6 +1943,32 @@ dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_init(struct mlx5dr_ste_build *sb,
sb->ste_build_tag_func = &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_tag;
}
static int
dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
uint8_t *tag)
{
u8 parser_id = sb->caps->flex_parser_id_geneve_tlv_option_0;
struct mlx5dr_match_misc *misc = &value->misc;
if (misc->geneve_tlv_option_0_exist) {
MLX5_SET(ste_flex_parser_ok, tag, flex_parsers_ok, 1 << parser_id);
misc->geneve_tlv_option_0_exist = 0;
}
return 0;
}
static void
dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_init(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask)
{
sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_OK;
dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_tag(mask, sb, sb->bit_mask);
sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
sb->ste_build_tag_func = &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_tag;
}
static int dr_ste_v1_build_flex_parser_tnl_gtpu_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
u8 *tag)
@ -2020,12 +2068,14 @@ struct mlx5dr_ste_ctx ste_ctx_v1 = {
.build_tnl_vxlan_gpe_init = &dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_init,
.build_tnl_geneve_init = &dr_ste_v1_build_flex_parser_tnl_geneve_init,
.build_tnl_geneve_tlv_opt_init = &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_init,
.build_tnl_geneve_tlv_opt_exist_init = &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_init,
.build_register_0_init = &dr_ste_v1_build_register_0_init,
.build_register_1_init = &dr_ste_v1_build_register_1_init,
.build_src_gvmi_qpn_init = &dr_ste_v1_build_src_gvmi_qpn_init,
.build_flex_parser_0_init = &dr_ste_v1_build_flex_parser_0_init,
.build_flex_parser_1_init = &dr_ste_v1_build_flex_parser_1_init,
.build_tnl_gtpu_init = &dr_ste_v1_build_flex_parser_tnl_gtpu_init,
.build_tnl_header_0_1_init = &dr_ste_v1_build_tnl_header_0_1_init,
.build_tnl_gtpu_flex_parser_0_init = &dr_ste_v1_build_tnl_gtpu_flex_parser_0_init,
.build_tnl_gtpu_flex_parser_1_init = &dr_ste_v1_build_tnl_gtpu_flex_parser_1_init,

View File

@ -3,12 +3,47 @@
#include "dr_types.h"
static int dr_table_set_miss_action_nic(struct mlx5dr_domain *dmn,
struct mlx5dr_table_rx_tx *nic_tbl,
struct mlx5dr_action *action)
{
struct mlx5dr_matcher_rx_tx *last_nic_matcher = NULL;
struct mlx5dr_htbl_connect_info info;
struct mlx5dr_ste_htbl *last_htbl;
int ret;
if (!list_empty(&nic_tbl->nic_matcher_list))
last_nic_matcher = list_last_entry(&nic_tbl->nic_matcher_list,
struct mlx5dr_matcher_rx_tx,
list_node);
if (last_nic_matcher)
last_htbl = last_nic_matcher->e_anchor;
else
last_htbl = nic_tbl->s_anchor;
if (action)
nic_tbl->default_icm_addr =
nic_tbl->nic_dmn->type == DR_DOMAIN_NIC_TYPE_RX ?
action->dest_tbl->tbl->rx.s_anchor->chunk->icm_addr :
action->dest_tbl->tbl->tx.s_anchor->chunk->icm_addr;
else
nic_tbl->default_icm_addr = nic_tbl->nic_dmn->default_icm_addr;
info.type = CONNECT_MISS;
info.miss_icm_addr = nic_tbl->default_icm_addr;
ret = mlx5dr_ste_htbl_init_and_postsend(dmn, nic_tbl->nic_dmn,
last_htbl, &info, true);
if (ret)
mlx5dr_dbg(dmn, "Failed to set NIC RX/TX miss action, ret %d\n", ret);
return ret;
}
int mlx5dr_table_set_miss_action(struct mlx5dr_table *tbl,
struct mlx5dr_action *action)
{
struct mlx5dr_matcher *last_matcher = NULL;
struct mlx5dr_htbl_connect_info info;
struct mlx5dr_ste_htbl *last_htbl;
int ret;
if (action && action->action_type != DR_ACTION_TYP_FT)
@ -16,56 +51,18 @@ int mlx5dr_table_set_miss_action(struct mlx5dr_table *tbl,
mlx5dr_domain_lock(tbl->dmn);
if (!list_empty(&tbl->matcher_list))
last_matcher = list_last_entry(&tbl->matcher_list,
struct mlx5dr_matcher,
matcher_list);
if (tbl->dmn->type == MLX5DR_DOMAIN_TYPE_NIC_RX ||
tbl->dmn->type == MLX5DR_DOMAIN_TYPE_FDB) {
if (last_matcher)
last_htbl = last_matcher->rx.e_anchor;
else
last_htbl = tbl->rx.s_anchor;
tbl->rx.default_icm_addr = action ?
action->dest_tbl->tbl->rx.s_anchor->chunk->icm_addr :
tbl->rx.nic_dmn->default_icm_addr;
info.type = CONNECT_MISS;
info.miss_icm_addr = tbl->rx.default_icm_addr;
ret = mlx5dr_ste_htbl_init_and_postsend(tbl->dmn,
tbl->rx.nic_dmn,
last_htbl,
&info, true);
if (ret) {
mlx5dr_dbg(tbl->dmn, "Failed to set RX miss action, ret %d\n", ret);
ret = dr_table_set_miss_action_nic(tbl->dmn, &tbl->rx, action);
if (ret)
goto out;
}
}
if (tbl->dmn->type == MLX5DR_DOMAIN_TYPE_NIC_TX ||
tbl->dmn->type == MLX5DR_DOMAIN_TYPE_FDB) {
if (last_matcher)
last_htbl = last_matcher->tx.e_anchor;
else
last_htbl = tbl->tx.s_anchor;
tbl->tx.default_icm_addr = action ?
action->dest_tbl->tbl->tx.s_anchor->chunk->icm_addr :
tbl->tx.nic_dmn->default_icm_addr;
info.type = CONNECT_MISS;
info.miss_icm_addr = tbl->tx.default_icm_addr;
ret = mlx5dr_ste_htbl_init_and_postsend(tbl->dmn,
tbl->tx.nic_dmn,
last_htbl, &info, true);
if (ret) {
mlx5dr_dbg(tbl->dmn, "Failed to set TX miss action, ret %d\n", ret);
ret = dr_table_set_miss_action_nic(tbl->dmn, &tbl->tx, action);
if (ret)
goto out;
}
}
/* Release old action */
@ -122,6 +119,8 @@ static int dr_table_init_nic(struct mlx5dr_domain *dmn,
struct mlx5dr_htbl_connect_info info;
int ret;
INIT_LIST_HEAD(&nic_tbl->nic_matcher_list);
nic_tbl->default_icm_addr = nic_dmn->default_icm_addr;
nic_tbl->s_anchor = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
@ -266,6 +265,8 @@ struct mlx5dr_table *mlx5dr_table_create(struct mlx5dr_domain *dmn, u32 level, u
if (ret)
goto uninit_tbl;
INIT_LIST_HEAD(&tbl->dbg_node);
mlx5dr_dbg_tbl_add(tbl);
return tbl;
uninit_tbl:
@ -281,9 +282,10 @@ int mlx5dr_table_destroy(struct mlx5dr_table *tbl)
{
int ret;
if (refcount_read(&tbl->refcount) > 1)
if (WARN_ON_ONCE(refcount_read(&tbl->refcount) > 1))
return -EBUSY;
mlx5dr_dbg_tbl_del(tbl);
ret = dr_table_destroy_sw_owned_tbl(tbl);
if (ret)
return ret;

View File

@ -11,6 +11,7 @@
#include "lib/mlx5.h"
#include "mlx5_ifc_dr.h"
#include "mlx5dr.h"
#include "dr_dbg.h"
#define DR_RULE_MAX_STES 18
#define DR_ACTION_MAX_STES 5
@ -104,7 +105,8 @@ enum mlx5dr_matcher_criteria {
DR_MATCHER_CRITERIA_MISC2 = 1 << 3,
DR_MATCHER_CRITERIA_MISC3 = 1 << 4,
DR_MATCHER_CRITERIA_MISC4 = 1 << 5,
DR_MATCHER_CRITERIA_MAX = 1 << 6,
DR_MATCHER_CRITERIA_MISC5 = 1 << 6,
DR_MATCHER_CRITERIA_MAX = 1 << 7,
};
enum mlx5dr_action_type {
@ -440,6 +442,11 @@ void mlx5dr_ste_build_tnl_geneve_tlv_opt(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_match_param *mask,
struct mlx5dr_cmd_caps *caps,
bool inner, bool rx);
void mlx5dr_ste_build_tnl_geneve_tlv_opt_exist(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
struct mlx5dr_cmd_caps *caps,
bool inner, bool rx);
void mlx5dr_ste_build_tnl_gtpu(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
@ -454,6 +461,10 @@ void mlx5dr_ste_build_tnl_gtpu_flex_parser_1(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_match_param *mask,
struct mlx5dr_cmd_caps *caps,
bool inner, bool rx);
void mlx5dr_ste_build_tnl_header_0_1(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
void mlx5dr_ste_build_general_purpose(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
@ -494,57 +505,64 @@ struct mlx5dr_match_spec {
/* Incoming packet Ethertype - this is the Ethertype
* following the last VLAN tag of the packet
*/
u32 ethertype:16;
u32 smac_15_0:16; /* Source MAC address of incoming packet */
u32 ethertype:16;
u32 dmac_47_16; /* Destination MAC address of incoming packet */
/* VLAN ID of first VLAN tag in the incoming packet.
* Valid only when cvlan_tag==1 or svlan_tag==1
*/
u32 first_vid:12;
/* CFI bit of first VLAN tag in the incoming packet.
* Valid only when cvlan_tag==1 or svlan_tag==1
*/
u32 first_cfi:1;
u32 dmac_15_0:16; /* Destination MAC address of incoming packet */
/* Priority of first VLAN tag in the incoming packet.
* Valid only when cvlan_tag==1 or svlan_tag==1
*/
u32 first_prio:3;
u32 dmac_15_0:16; /* Destination MAC address of incoming packet */
/* TCP flags. ;Bit 0: FIN;Bit 1: SYN;Bit 2: RST;Bit 3: PSH;Bit 4: ACK;
* Bit 5: URG;Bit 6: ECE;Bit 7: CWR;Bit 8: NS
/* CFI bit of first VLAN tag in the incoming packet.
* Valid only when cvlan_tag==1 or svlan_tag==1
*/
u32 tcp_flags:9;
u32 ip_version:4; /* IP version */
u32 frag:1; /* Packet is an IP fragment */
/* The first vlan in the packet is s-vlan (0x8a88).
* cvlan_tag and svlan_tag cannot be set together
u32 first_cfi:1;
/* VLAN ID of first VLAN tag in the incoming packet.
* Valid only when cvlan_tag==1 or svlan_tag==1
*/
u32 svlan_tag:1;
/* The first vlan in the packet is c-vlan (0x8100).
* cvlan_tag and svlan_tag cannot be set together
*/
u32 cvlan_tag:1;
/* Explicit Congestion Notification derived from
* Traffic Class/TOS field of IPv6/v4
*/
u32 ip_ecn:2;
u32 first_vid:12;
u32 ip_protocol:8; /* IP protocol */
/* Differentiated Services Code Point derived from
* Traffic Class/TOS field of IPv6/v4
*/
u32 ip_dscp:6;
u32 ip_protocol:8; /* IP protocol */
/* Explicit Congestion Notification derived from
* Traffic Class/TOS field of IPv6/v4
*/
u32 ip_ecn:2;
/* The first vlan in the packet is c-vlan (0x8100).
* cvlan_tag and svlan_tag cannot be set together
*/
u32 cvlan_tag:1;
/* The first vlan in the packet is s-vlan (0x8a88).
* cvlan_tag and svlan_tag cannot be set together
*/
u32 svlan_tag:1;
u32 frag:1; /* Packet is an IP fragment */
u32 ip_version:4; /* IP version */
/* TCP flags. ;Bit 0: FIN;Bit 1: SYN;Bit 2: RST;Bit 3: PSH;Bit 4: ACK;
* Bit 5: URG;Bit 6: ECE;Bit 7: CWR;Bit 8: NS
*/
u32 tcp_flags:9;
/* TCP source port.;tcp and udp sport/dport are mutually exclusive */
u32 tcp_sport:16;
/* TCP destination port.
* tcp and udp sport/dport are mutually exclusive
*/
u32 tcp_dport:16;
/* TCP source port.;tcp and udp sport/dport are mutually exclusive */
u32 tcp_sport:16;
u32 reserved_auto1:24;
u32 ttl_hoplimit:8;
u32 reserved:24;
/* UDP destination port.;tcp and udp sport/dport are mutually exclusive */
u32 udp_dport:16;
/* UDP source port.;tcp and udp sport/dport are mutually exclusive */
u32 udp_sport:16;
/* UDP destination port.;tcp and udp sport/dport are mutually exclusive */
u32 udp_dport:16;
/* IPv6 source address of incoming packets
* For IPv4 address use bits 31:0 (rest of the bits are reserved)
* This field should be qualified by an appropriate ethertype
@ -588,96 +606,114 @@ struct mlx5dr_match_spec {
};
struct mlx5dr_match_misc {
u32 source_sqn:24; /* Source SQN */
u32 source_vhca_port:4;
/* used with GRE, sequence number exist when gre_s_present == 1 */
u32 gre_s_present:1;
/* used with GRE, key exist when gre_k_present == 1 */
u32 gre_k_present:1;
u32 reserved_auto1:1;
/* used with GRE, checksum exist when gre_c_present == 1 */
u32 gre_c_present:1;
u32 reserved_auto1:1;
/* used with GRE, key exist when gre_k_present == 1 */
u32 gre_k_present:1;
/* used with GRE, sequence number exist when gre_s_present == 1 */
u32 gre_s_present:1;
u32 source_vhca_port:4;
u32 source_sqn:24; /* Source SQN */
u32 source_eswitch_owner_vhca_id:16;
/* Source port.;0xffff determines wire port */
u32 source_port:16;
u32 source_eswitch_owner_vhca_id:16;
/* VLAN ID of first VLAN tag the inner header of the incoming packet.
* Valid only when inner_second_cvlan_tag ==1 or inner_second_svlan_tag ==1
*/
u32 inner_second_vid:12;
/* CFI bit of first VLAN tag in the inner header of the incoming packet.
* Valid only when inner_second_cvlan_tag ==1 or inner_second_svlan_tag ==1
*/
u32 inner_second_cfi:1;
/* Priority of second VLAN tag in the inner header of the incoming packet.
* Valid only when inner_second_cvlan_tag ==1 or inner_second_svlan_tag ==1
*/
u32 inner_second_prio:3;
/* VLAN ID of first VLAN tag the outer header of the incoming packet.
* Valid only when outer_second_cvlan_tag ==1 or outer_second_svlan_tag ==1
*/
u32 outer_second_vid:12;
/* CFI bit of first VLAN tag in the outer header of the incoming packet.
* Valid only when outer_second_cvlan_tag ==1 or outer_second_svlan_tag ==1
*/
u32 outer_second_cfi:1;
/* Priority of second VLAN tag in the outer header of the incoming packet.
* Valid only when outer_second_cvlan_tag ==1 or outer_second_svlan_tag ==1
*/
u32 outer_second_prio:3;
u32 gre_protocol:16; /* GRE Protocol (outer) */
u32 reserved_auto3:12;
/* The second vlan in the inner header of the packet is s-vlan (0x8a88).
* inner_second_cvlan_tag and inner_second_svlan_tag cannot be set together
/* CFI bit of first VLAN tag in the outer header of the incoming packet.
* Valid only when outer_second_cvlan_tag ==1 or outer_second_svlan_tag ==1
*/
u32 inner_second_svlan_tag:1;
/* The second vlan in the outer header of the packet is s-vlan (0x8a88).
u32 outer_second_cfi:1;
/* VLAN ID of first VLAN tag the outer header of the incoming packet.
* Valid only when outer_second_cvlan_tag ==1 or outer_second_svlan_tag ==1
*/
u32 outer_second_vid:12;
/* Priority of second VLAN tag in the inner header of the incoming packet.
* Valid only when inner_second_cvlan_tag ==1 or inner_second_svlan_tag ==1
*/
u32 inner_second_prio:3;
/* CFI bit of first VLAN tag in the inner header of the incoming packet.
* Valid only when inner_second_cvlan_tag ==1 or inner_second_svlan_tag ==1
*/
u32 inner_second_cfi:1;
/* VLAN ID of first VLAN tag the inner header of the incoming packet.
* Valid only when inner_second_cvlan_tag ==1 or inner_second_svlan_tag ==1
*/
u32 inner_second_vid:12;
u32 outer_second_cvlan_tag:1;
u32 inner_second_cvlan_tag:1;
/* The second vlan in the outer header of the packet is c-vlan (0x8100).
* outer_second_cvlan_tag and outer_second_svlan_tag cannot be set together
*/
u32 outer_second_svlan_tag:1;
/* The second vlan in the inner header of the packet is c-vlan (0x8100).
* inner_second_cvlan_tag and inner_second_svlan_tag cannot be set together
*/
u32 inner_second_cvlan_tag:1;
/* The second vlan in the outer header of the packet is c-vlan (0x8100).
u32 inner_second_svlan_tag:1;
/* The second vlan in the outer header of the packet is s-vlan (0x8a88).
* outer_second_cvlan_tag and outer_second_svlan_tag cannot be set together
*/
u32 outer_second_cvlan_tag:1;
u32 gre_key_l:8; /* GRE Key [7:0] (outer) */
u32 reserved_auto2:12;
/* The second vlan in the inner header of the packet is s-vlan (0x8a88).
* inner_second_cvlan_tag and inner_second_svlan_tag cannot be set together
*/
u32 gre_protocol:16; /* GRE Protocol (outer) */
u32 gre_key_h:24; /* GRE Key[31:8] (outer) */
u32 reserved_auto4:8;
u32 gre_key_l:8; /* GRE Key [7:0] (outer) */
u32 vxlan_vni:24; /* VXLAN VNI (outer) */
u32 geneve_oam:1; /* GENEVE OAM field (outer) */
u32 reserved_auto5:7;
u32 reserved_auto3:8;
u32 geneve_vni:24; /* GENEVE VNI field (outer) */
u32 reserved_auto4:6;
u32 geneve_tlv_option_0_exist:1;
u32 geneve_oam:1; /* GENEVE OAM field (outer) */
u32 reserved_auto5:12;
u32 outer_ipv6_flow_label:20; /* Flow label of incoming IPv6 packet (outer) */
u32 reserved_auto6:12;
u32 inner_ipv6_flow_label:20; /* Flow label of incoming IPv6 packet (inner) */
u32 reserved_auto7:12;
u32 geneve_protocol_type:16; /* GENEVE protocol type (outer) */
u32 reserved_auto7:10;
u32 geneve_opt_len:6; /* GENEVE OptLen (outer) */
u32 reserved_auto8:10;
u32 geneve_protocol_type:16; /* GENEVE protocol type (outer) */
u32 reserved_auto8:8;
u32 bth_dst_qp:24; /* Destination QP in BTH header */
u32 reserved_auto9:8;
u8 reserved_auto10[20];
u32 reserved_auto9;
u32 outer_esp_spi;
u32 reserved_auto10[3];
};
struct mlx5dr_match_misc2 {
u32 outer_first_mpls_ttl:8; /* First MPLS TTL (outer) */
u32 outer_first_mpls_s_bos:1; /* First MPLS S_BOS (outer) */
u32 outer_first_mpls_exp:3; /* First MPLS EXP (outer) */
u32 outer_first_mpls_label:20; /* First MPLS LABEL (outer) */
u32 inner_first_mpls_ttl:8; /* First MPLS TTL (inner) */
u32 inner_first_mpls_s_bos:1; /* First MPLS S_BOS (inner) */
u32 inner_first_mpls_exp:3; /* First MPLS EXP (inner) */
u32 outer_first_mpls_exp:3; /* First MPLS EXP (outer) */
u32 outer_first_mpls_s_bos:1; /* First MPLS S_BOS (outer) */
u32 outer_first_mpls_ttl:8; /* First MPLS TTL (outer) */
u32 inner_first_mpls_label:20; /* First MPLS LABEL (inner) */
u32 outer_first_mpls_over_gre_ttl:8; /* last MPLS TTL (outer) */
u32 outer_first_mpls_over_gre_s_bos:1; /* last MPLS S_BOS (outer) */
u32 outer_first_mpls_over_gre_exp:3; /* last MPLS EXP (outer) */
u32 inner_first_mpls_exp:3; /* First MPLS EXP (inner) */
u32 inner_first_mpls_s_bos:1; /* First MPLS S_BOS (inner) */
u32 inner_first_mpls_ttl:8; /* First MPLS TTL (inner) */
u32 outer_first_mpls_over_gre_label:20; /* last MPLS LABEL (outer) */
u32 outer_first_mpls_over_udp_ttl:8; /* last MPLS TTL (outer) */
u32 outer_first_mpls_over_udp_s_bos:1; /* last MPLS S_BOS (outer) */
u32 outer_first_mpls_over_udp_exp:3; /* last MPLS EXP (outer) */
u32 outer_first_mpls_over_gre_exp:3; /* last MPLS EXP (outer) */
u32 outer_first_mpls_over_gre_s_bos:1; /* last MPLS S_BOS (outer) */
u32 outer_first_mpls_over_gre_ttl:8; /* last MPLS TTL (outer) */
u32 outer_first_mpls_over_udp_label:20; /* last MPLS LABEL (outer) */
u32 outer_first_mpls_over_udp_exp:3; /* last MPLS EXP (outer) */
u32 outer_first_mpls_over_udp_s_bos:1; /* last MPLS S_BOS (outer) */
u32 outer_first_mpls_over_udp_ttl:8; /* last MPLS TTL (outer) */
u32 metadata_reg_c_7; /* metadata_reg_c_7 */
u32 metadata_reg_c_6; /* metadata_reg_c_6 */
u32 metadata_reg_c_5; /* metadata_reg_c_5 */
@ -687,7 +723,7 @@ struct mlx5dr_match_misc2 {
u32 metadata_reg_c_1; /* metadata_reg_c_1 */
u32 metadata_reg_c_0; /* metadata_reg_c_0 */
u32 metadata_reg_a; /* metadata_reg_a */
u8 reserved_auto2[12];
u32 reserved_auto1[3];
};
struct mlx5dr_match_misc3 {
@ -695,24 +731,34 @@ struct mlx5dr_match_misc3 {
u32 outer_tcp_seq_num;
u32 inner_tcp_ack_num;
u32 outer_tcp_ack_num;
u32 outer_vxlan_gpe_vni:24;
u32 reserved_auto1:8;
u32 reserved_auto2:16;
u32 outer_vxlan_gpe_flags:8;
u32 outer_vxlan_gpe_vni:24;
u32 outer_vxlan_gpe_next_protocol:8;
u32 outer_vxlan_gpe_flags:8;
u32 reserved_auto2:16;
u32 icmpv4_header_data;
u32 icmpv6_header_data;
u8 icmpv6_code;
u8 icmpv6_type;
u8 icmpv4_code;
u8 icmpv4_type;
u8 icmpv4_code;
u8 icmpv6_type;
u8 icmpv6_code;
u32 geneve_tlv_option_0_data;
u8 gtpu_msg_flags;
u8 gtpu_msg_type;
u32 gtpu_teid;
u8 gtpu_msg_type;
u8 gtpu_msg_flags;
u32 reserved_auto3:16;
u32 gtpu_dw_2;
u32 gtpu_first_ext_dw_0;
u32 gtpu_dw_0;
u32 reserved_auto4;
};
struct mlx5dr_match_misc4 {
@ -724,6 +770,18 @@ struct mlx5dr_match_misc4 {
u32 prog_sample_field_id_2;
u32 prog_sample_field_value_3;
u32 prog_sample_field_id_3;
u32 reserved_auto1[8];
};
struct mlx5dr_match_misc5 {
u32 macsec_tag_0;
u32 macsec_tag_1;
u32 macsec_tag_2;
u32 macsec_tag_3;
u32 tunnel_header_0;
u32 tunnel_header_1;
u32 tunnel_header_2;
u32 tunnel_header_3;
};
struct mlx5dr_match_param {
@ -733,6 +791,7 @@ struct mlx5dr_match_param {
struct mlx5dr_match_misc2 misc2;
struct mlx5dr_match_misc3 misc3;
struct mlx5dr_match_misc4 misc4;
struct mlx5dr_match_misc5 misc5;
};
#define DR_MASK_IS_ICMPV4_SET(_misc3) ((_misc3)->icmpv4_type || \
@ -789,6 +848,7 @@ struct mlx5dr_cmd_caps {
u8 flex_parser_id_gtpu_teid;
u8 flex_parser_id_gtpu_dw_2;
u8 flex_parser_id_gtpu_first_ext_dw_0;
u8 flex_parser_ok_bits_supp;
u8 max_ft_level;
u16 roce_min_src_udp;
u8 sw_format_ver;
@ -843,12 +903,15 @@ struct mlx5dr_domain {
struct mlx5dr_domain_info info;
struct xarray csum_fts_xa;
struct mlx5dr_ste_ctx *ste_ctx;
struct list_head dbg_tbl_list;
struct mlx5dr_dbg_dump_info dump_info;
};
struct mlx5dr_table_rx_tx {
struct mlx5dr_ste_htbl *s_anchor;
struct mlx5dr_domain_rx_tx *nic_dmn;
u64 default_icm_addr;
struct list_head nic_matcher_list;
};
struct mlx5dr_table {
@ -862,6 +925,7 @@ struct mlx5dr_table {
struct list_head matcher_list;
struct mlx5dr_action *miss_action;
refcount_t refcount;
struct list_head dbg_node;
};
struct mlx5dr_matcher_rx_tx {
@ -875,18 +939,21 @@ struct mlx5dr_matcher_rx_tx {
u8 num_of_builders_arr[DR_RULE_IPV_MAX][DR_RULE_IPV_MAX];
u64 default_icm_addr;
struct mlx5dr_table_rx_tx *nic_tbl;
u32 prio;
struct list_head list_node;
u32 rules;
};
struct mlx5dr_matcher {
struct mlx5dr_table *tbl;
struct mlx5dr_matcher_rx_tx rx;
struct mlx5dr_matcher_rx_tx tx;
struct list_head matcher_list;
struct list_head list_node; /* Used for both matchers and dbg managing */
u32 prio;
struct mlx5dr_match_param mask;
u8 match_criteria;
refcount_t refcount;
struct mlx5dv_flow_matcher *dv_matcher;
struct list_head dbg_rule_list;
};
struct mlx5dr_ste_action_modify_field {
@ -958,6 +1025,11 @@ struct mlx5dr_action_flow_tag {
u32 flow_tag;
};
struct mlx5dr_rule_action_member {
struct mlx5dr_action *action;
struct list_head list;
};
struct mlx5dr_action {
enum mlx5dr_action_type action_type;
refcount_t refcount;
@ -998,6 +1070,7 @@ struct mlx5dr_rule {
struct mlx5dr_rule_rx_tx rx;
struct mlx5dr_rule_rx_tx tx;
struct list_head rule_actions_list;
struct list_head dbg_node;
u32 flow_source;
};
@ -1050,6 +1123,11 @@ static inline void mlx5dr_domain_unlock(struct mlx5dr_domain *dmn)
mlx5dr_domain_nic_unlock(&dmn->info.rx);
}
int mlx5dr_matcher_add_to_tbl_nic(struct mlx5dr_domain *dmn,
struct mlx5dr_matcher_rx_tx *nic_matcher);
int mlx5dr_matcher_remove_from_tbl_nic(struct mlx5dr_domain *dmn,
struct mlx5dr_matcher_rx_tx *nic_matcher);
int mlx5dr_matcher_select_builders(struct mlx5dr_matcher *matcher,
struct mlx5dr_matcher_rx_tx *nic_matcher,
enum mlx5dr_ipv outer_ipv,

View File

@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2019 Mellanox Technologies */
#include <linux/mlx5/vport.h>
#include "mlx5_core.h"
#include "fs_core.h"
#include "fs_cmd.h"
@ -194,6 +195,15 @@ static struct mlx5dr_action *create_vport_action(struct mlx5dr_domain *domain,
dest_attr->vport.vhca_id);
}
static struct mlx5dr_action *create_uplink_action(struct mlx5dr_domain *domain,
struct mlx5_flow_rule *dst)
{
struct mlx5_flow_destination *dest_attr = &dst->dest_attr;
return mlx5dr_action_create_dest_vport(domain, MLX5_VPORT_UPLINK, 1,
dest_attr->vport.vhca_id);
}
static struct mlx5dr_action *create_ft_action(struct mlx5dr_domain *domain,
struct mlx5_flow_rule *dst)
{
@ -218,7 +228,8 @@ static struct mlx5dr_action *create_action_push_vlan(struct mlx5dr_domain *domai
static bool contain_vport_reformat_action(struct mlx5_flow_rule *dst)
{
return dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_VPORT &&
return (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_VPORT ||
dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_UPLINK) &&
dst->dest_attr.vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
}
@ -411,8 +422,11 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
fs_dr_actions[fs_dr_num_actions++] = tmp_action;
term_actions[num_term_actions++].dest = tmp_action;
break;
case MLX5_FLOW_DESTINATION_TYPE_UPLINK:
case MLX5_FLOW_DESTINATION_TYPE_VPORT:
tmp_action = create_vport_action(domain, dst);
tmp_action = type == MLX5_FLOW_DESTINATION_TYPE_VPORT ?
create_vport_action(domain, dst) :
create_uplink_action(domain, dst);
if (!tmp_action) {
err = -ENOMEM;
goto free_actions;

View File

@ -447,6 +447,14 @@ struct mlx5_ifc_ste_flex_parser_1_bits {
u8 flex_parser_4[0x20];
};
struct mlx5_ifc_ste_flex_parser_ok_bits {
u8 flex_parser_3[0x20];
u8 flex_parser_2[0x20];
u8 flex_parsers_ok[0x8];
u8 reserved_at_48[0x18];
u8 flex_parser_0[0x20];
};
struct mlx5_ifc_ste_flex_parser_tnl_bits {
u8 flex_parser_tunneling_header_63_32[0x20];
@ -490,6 +498,14 @@ struct mlx5_ifc_ste_flex_parser_tnl_gtpu_bits {
u8 reserved_at_40[0x40];
};
struct mlx5_ifc_ste_tunnel_header_bits {
u8 tunnel_header_0[0x20];
u8 tunnel_header_1[0x20];
u8 reserved_at_40[0x40];
};
struct mlx5_ifc_ste_general_purpose_bits {
u8 general_purpose_lookup_field[0x20];

View File

@ -1117,6 +1117,7 @@ enum {
MLX5_MATCH_MISC_PARAMETERS_2 = 1 << 3,
MLX5_MATCH_MISC_PARAMETERS_3 = 1 << 4,
MLX5_MATCH_MISC_PARAMETERS_4 = 1 << 5,
MLX5_MATCH_MISC_PARAMETERS_5 = 1 << 6,
};
enum {

View File

@ -372,7 +372,8 @@ struct mlx5_ifc_flow_table_fields_supported_bits {
u8 reserved_at_37[0x9];
u8 geneve_tlv_option_0_data[0x1];
u8 reserved_at_41[0x4];
u8 geneve_tlv_option_0_exist[0x1];
u8 reserved_at_42[0x3];
u8 outer_first_mpls_over_udp[0x4];
u8 outer_first_mpls_over_gre[0x4];
u8 inner_first_mpls[0x4];
@ -551,7 +552,8 @@ struct mlx5_ifc_fte_match_set_misc_bits {
u8 bth_opcode[0x8];
u8 geneve_vni[0x18];
u8 reserved_at_d8[0x7];
u8 reserved_at_d8[0x6];
u8 geneve_tlv_option_0_exist[0x1];
u8 geneve_oam[0x1];
u8 reserved_at_e0[0xc];
@ -670,6 +672,26 @@ struct mlx5_ifc_fte_match_set_misc4_bits {
u8 reserved_at_100[0x100];
};
struct mlx5_ifc_fte_match_set_misc5_bits {
u8 macsec_tag_0[0x20];
u8 macsec_tag_1[0x20];
u8 macsec_tag_2[0x20];
u8 macsec_tag_3[0x20];
u8 tunnel_header_0[0x20];
u8 tunnel_header_1[0x20];
u8 tunnel_header_2[0x20];
u8 tunnel_header_3[0x20];
u8 reserved_at_100[0x100];
};
struct mlx5_ifc_cmd_pas_bits {
u8 pa_h[0x20];
@ -811,7 +833,7 @@ struct mlx5_ifc_flow_table_eswitch_cap_bits {
u8 fdb_to_vport_reg_c_id[0x8];
u8 reserved_at_8[0xd];
u8 fdb_modify_header_fwd_to_table[0x1];
u8 reserved_at_16[0x1];
u8 fdb_ipv4_ttl_modify[0x1];
u8 flow_source[0x1];
u8 reserved_at_18[0x2];
u8 multi_fdb_encap[0x1];
@ -1291,7 +1313,7 @@ enum {
enum {
MLX5_FLEX_PARSER_GENEVE_ENABLED = 1 << 3,
MLX5_FLEX_PARSER_MPLS_OVER_GRE_ENABLED = 1 << 4,
mlx5_FLEX_PARSER_MPLS_OVER_UDP_ENABLED = 1 << 5,
MLX5_FLEX_PARSER_MPLS_OVER_UDP_ENABLED = 1 << 5,
MLX5_FLEX_PARSER_VXLAN_GPE_ENABLED = 1 << 7,
MLX5_FLEX_PARSER_ICMP_V4_ENABLED = 1 << 8,
MLX5_FLEX_PARSER_ICMP_V6_ENABLED = 1 << 9,
@ -1839,7 +1861,9 @@ struct mlx5_ifc_fte_match_param_bits {
struct mlx5_ifc_fte_match_set_misc4_bits misc_parameters_4;
u8 reserved_at_c00[0x400];
struct mlx5_ifc_fte_match_set_misc5_bits misc_parameters_5;
u8 reserved_at_e00[0x200];
};
enum {
@ -5977,6 +6001,7 @@ enum {
MLX5_QUERY_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS_2 = 0x3,
MLX5_QUERY_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS_3 = 0x4,
MLX5_QUERY_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS_4 = 0x5,
MLX5_QUERY_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS_5 = 0x6,
};
struct mlx5_ifc_query_flow_group_out_bits {

View File

@ -252,7 +252,7 @@ enum mlx5_ib_device_query_context_attrs {
MLX5_IB_ATTR_QUERY_CONTEXT_RESP_UCTX = (1U << UVERBS_ID_NS_SHIFT),
};
#define MLX5_IB_DW_MATCH_PARAM 0x90
#define MLX5_IB_DW_MATCH_PARAM 0xA0
struct mlx5_ib_match_params {
__u32 match_params[MLX5_IB_DW_MATCH_PARAM];