2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-25 21:54:06 +08:00

net/mlx5: E-Switch, Refactor eswitch ingress acl codes

Restructure the eswitch ingress acl codes into eswitch directory
and different files:
. Acl ingress helper functions to acl_helper.c/h
. Acl ingress functions used in offloads mode to acl_ingress_ofld.c
. Acl ingress functions used in legacy mode to acl_ingress_lgy.c

This patch does not change any functionality.

Signed-off-by: Vu Pham <vuhuong@mellanox.com>
This commit is contained in:
Vu Pham 2020-03-27 23:12:22 -07:00 committed by Saeed Mahameed
parent ea651a86d4
commit 07bab95026
10 changed files with 619 additions and 583 deletions

View File

@ -47,7 +47,8 @@ mlx5_core-$(CONFIG_MLX5_TC_CT) += en/tc_ct.o
mlx5_core-$(CONFIG_MLX5_ESWITCH) += eswitch.o eswitch_offloads.o eswitch_offloads_termtbl.o \
ecpf.o rdma.o
mlx5_core-$(CONFIG_MLX5_ESWITCH) += esw/acl/helper.o \
esw/acl/egress_lgcy.o esw/acl/egress_ofld.o
esw/acl/egress_lgcy.o esw/acl/egress_ofld.o \
esw/acl/ingress_lgcy.o esw/acl/ingress_ofld.o
mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o
mlx5_core-$(CONFIG_VXLAN) += lib/vxlan.o

View File

@ -140,3 +140,21 @@ void esw_acl_egress_table_destroy(struct mlx5_vport *vport)
mlx5_destroy_flow_table(vport->egress.acl);
vport->egress.acl = NULL;
}
void esw_acl_ingress_table_destroy(struct mlx5_vport *vport)
{
if (!vport->ingress.acl)
return;
mlx5_destroy_flow_table(vport->ingress.acl);
vport->ingress.acl = NULL;
}
void esw_acl_ingress_allow_rule_destroy(struct mlx5_vport *vport)
{
if (!vport->ingress.allow_rule)
return;
mlx5_del_flow_rules(vport->ingress.allow_rule);
vport->ingress.allow_rule = NULL;
}

View File

@ -19,4 +19,8 @@ void esw_acl_egress_vlan_destroy(struct mlx5_vport *vport);
int esw_acl_egress_vlan_grp_create(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
void esw_acl_egress_vlan_grp_destroy(struct mlx5_vport *vport);
/* Ingress acl helper functions */
void esw_acl_ingress_table_destroy(struct mlx5_vport *vport);
void esw_acl_ingress_allow_rule_destroy(struct mlx5_vport *vport);
#endif /* __MLX5_ESWITCH_ACL_HELPER_H__ */

View File

@ -0,0 +1,279 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2020 Mellanox Technologies Inc. All rights reserved. */
#include "mlx5_core.h"
#include "eswitch.h"
#include "helper.h"
#include "lgcy.h"
static void esw_acl_ingress_lgcy_rules_destroy(struct mlx5_vport *vport)
{
if (vport->ingress.legacy.drop_rule) {
mlx5_del_flow_rules(vport->ingress.legacy.drop_rule);
vport->ingress.legacy.drop_rule = NULL;
}
esw_acl_ingress_allow_rule_destroy(vport);
}
static int esw_acl_ingress_lgcy_groups_create(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_core_dev *dev = esw->dev;
struct mlx5_flow_group *g;
void *match_criteria;
u32 *flow_group_in;
int err;
flow_group_in = kvzalloc(inlen, GFP_KERNEL);
if (!flow_group_in)
return -ENOMEM;
match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
MLX5_MATCH_OUTER_HEADERS);
MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_47_16);
MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_15_0);
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
if (IS_ERR(g)) {
err = PTR_ERR(g);
esw_warn(dev, "vport[%d] ingress create untagged spoofchk flow group, err(%d)\n",
vport->vport, err);
goto spoof_err;
}
vport->ingress.legacy.allow_untagged_spoofchk_grp = g;
memset(flow_group_in, 0, inlen);
MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
MLX5_MATCH_OUTER_HEADERS);
MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
if (IS_ERR(g)) {
err = PTR_ERR(g);
esw_warn(dev, "vport[%d] ingress create untagged flow group, err(%d)\n",
vport->vport, err);
goto untagged_err;
}
vport->ingress.legacy.allow_untagged_only_grp = g;
memset(flow_group_in, 0, inlen);
MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
MLX5_MATCH_OUTER_HEADERS);
MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_47_16);
MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_15_0);
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 2);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 2);
g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
if (IS_ERR(g)) {
err = PTR_ERR(g);
esw_warn(dev, "vport[%d] ingress create spoofchk flow group, err(%d)\n",
vport->vport, err);
goto allow_spoof_err;
}
vport->ingress.legacy.allow_spoofchk_only_grp = g;
memset(flow_group_in, 0, inlen);
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 3);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 3);
g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
if (IS_ERR(g)) {
err = PTR_ERR(g);
esw_warn(dev, "vport[%d] ingress create drop flow group, err(%d)\n",
vport->vport, err);
goto drop_err;
}
vport->ingress.legacy.drop_grp = g;
kvfree(flow_group_in);
return 0;
drop_err:
if (!IS_ERR_OR_NULL(vport->ingress.legacy.allow_spoofchk_only_grp)) {
mlx5_destroy_flow_group(vport->ingress.legacy.allow_spoofchk_only_grp);
vport->ingress.legacy.allow_spoofchk_only_grp = NULL;
}
allow_spoof_err:
if (!IS_ERR_OR_NULL(vport->ingress.legacy.allow_untagged_only_grp)) {
mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_only_grp);
vport->ingress.legacy.allow_untagged_only_grp = NULL;
}
untagged_err:
if (!IS_ERR_OR_NULL(vport->ingress.legacy.allow_untagged_spoofchk_grp)) {
mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_spoofchk_grp);
vport->ingress.legacy.allow_untagged_spoofchk_grp = NULL;
}
spoof_err:
kvfree(flow_group_in);
return err;
}
static void esw_acl_ingress_lgcy_groups_destroy(struct mlx5_vport *vport)
{
if (vport->ingress.legacy.allow_spoofchk_only_grp) {
mlx5_destroy_flow_group(vport->ingress.legacy.allow_spoofchk_only_grp);
vport->ingress.legacy.allow_spoofchk_only_grp = NULL;
}
if (vport->ingress.legacy.allow_untagged_only_grp) {
mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_only_grp);
vport->ingress.legacy.allow_untagged_only_grp = NULL;
}
if (vport->ingress.legacy.allow_untagged_spoofchk_grp) {
mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_spoofchk_grp);
vport->ingress.legacy.allow_untagged_spoofchk_grp = NULL;
}
if (vport->ingress.legacy.drop_grp) {
mlx5_destroy_flow_group(vport->ingress.legacy.drop_grp);
vport->ingress.legacy.drop_grp = NULL;
}
}
int esw_acl_ingress_lgcy_setup(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
struct mlx5_flow_destination drop_ctr_dst = {};
struct mlx5_flow_destination *dst = NULL;
struct mlx5_flow_act flow_act = {};
struct mlx5_flow_spec *spec = NULL;
struct mlx5_fc *counter = NULL;
/* The ingress acl table contains 4 groups
* (2 active rules at the same time -
* 1 allow rule from one of the first 3 groups.
* 1 drop rule from the last group):
* 1)Allow untagged traffic with smac=original mac.
* 2)Allow untagged traffic.
* 3)Allow traffic with smac=original mac.
* 4)Drop all other traffic.
*/
int table_size = 4;
int dest_num = 0;
int err = 0;
u8 *smac_v;
esw_acl_ingress_lgcy_rules_destroy(vport);
if (MLX5_CAP_ESW_INGRESS_ACL(esw->dev, flow_counter)) {
counter = mlx5_fc_create(esw->dev, false);
if (IS_ERR(counter))
esw_warn(esw->dev,
"vport[%d] configure ingress drop rule counter failed\n",
vport->vport);
vport->ingress.legacy.drop_counter = counter;
}
if (!vport->info.vlan && !vport->info.qos && !vport->info.spoofchk) {
esw_acl_ingress_lgcy_cleanup(esw, vport);
return 0;
}
if (!vport->ingress.acl) {
vport->ingress.acl = esw_acl_table_create(esw, vport->vport,
MLX5_FLOW_NAMESPACE_ESW_INGRESS,
table_size);
if (IS_ERR_OR_NULL(vport->ingress.acl)) {
err = PTR_ERR(vport->ingress.acl);
vport->ingress.acl = NULL;
return err;
}
err = esw_acl_ingress_lgcy_groups_create(esw, vport);
if (err)
goto out;
}
esw_debug(esw->dev,
"vport[%d] configure ingress rules, vlan(%d) qos(%d)\n",
vport->vport, vport->info.vlan, vport->info.qos);
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec) {
err = -ENOMEM;
goto out;
}
if (vport->info.vlan || vport->info.qos)
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
outer_headers.cvlan_tag);
if (vport->info.spoofchk) {
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
outer_headers.smac_47_16);
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
outer_headers.smac_15_0);
smac_v = MLX5_ADDR_OF(fte_match_param,
spec->match_value,
outer_headers.smac_47_16);
ether_addr_copy(smac_v, vport->info.mac);
}
/* Create ingress allow rule */
memset(spec, 0, sizeof(*spec));
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
vport->ingress.allow_rule = mlx5_add_flow_rules(vport->ingress.acl, spec,
&flow_act, NULL, 0);
if (IS_ERR(vport->ingress.allow_rule)) {
err = PTR_ERR(vport->ingress.allow_rule);
esw_warn(esw->dev,
"vport[%d] configure ingress allow rule, err(%d)\n",
vport->vport, err);
vport->ingress.allow_rule = NULL;
goto out;
}
memset(&flow_act, 0, sizeof(flow_act));
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
/* Attach drop flow counter */
if (counter) {
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
drop_ctr_dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
drop_ctr_dst.counter_id = mlx5_fc_id(counter);
dst = &drop_ctr_dst;
dest_num++;
}
vport->ingress.legacy.drop_rule =
mlx5_add_flow_rules(vport->ingress.acl, NULL,
&flow_act, dst, dest_num);
if (IS_ERR(vport->ingress.legacy.drop_rule)) {
err = PTR_ERR(vport->ingress.legacy.drop_rule);
esw_warn(esw->dev,
"vport[%d] configure ingress drop rule, err(%d)\n",
vport->vport, err);
vport->ingress.legacy.drop_rule = NULL;
goto out;
}
kvfree(spec);
return 0;
out:
esw_acl_ingress_lgcy_cleanup(esw, vport);
kvfree(spec);
return err;
}
void esw_acl_ingress_lgcy_cleanup(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
if (IS_ERR_OR_NULL(vport->ingress.acl))
goto clean_drop_counter;
esw_debug(esw->dev, "Destroy vport[%d] E-Switch ingress ACL\n", vport->vport);
esw_acl_ingress_lgcy_rules_destroy(vport);
esw_acl_ingress_lgcy_groups_destroy(vport);
esw_acl_ingress_table_destroy(vport);
clean_drop_counter:
if (!IS_ERR_OR_NULL(vport->ingress.legacy.drop_counter)) {
mlx5_fc_destroy(esw->dev, vport->ingress.legacy.drop_counter);
vport->ingress.legacy.drop_counter = NULL;
}
}

View File

@ -0,0 +1,293 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2020 Mellanox Technologies Inc. All rights reserved. */
#include "mlx5_core.h"
#include "eswitch.h"
#include "helper.h"
#include "ofld.h"
static bool
esw_acl_ingress_prio_tag_enabled(const struct mlx5_eswitch *esw,
const struct mlx5_vport *vport)
{
return (MLX5_CAP_GEN(esw->dev, prio_tag_required) &&
mlx5_eswitch_is_vf_vport(esw, vport->vport));
}
static int esw_acl_ingress_prio_tag_create(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
struct mlx5_flow_act flow_act = {};
struct mlx5_flow_spec *spec;
int err = 0;
/* For prio tag mode, there is only 1 FTEs:
* 1) Untagged packets - push prio tag VLAN and modify metadata if
* required, allow
* Unmatched traffic is allowed by default
*/
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec)
return -ENOMEM;
/* Untagged packets - push prio tag VLAN, allow */
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 0);
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
MLX5_FLOW_CONTEXT_ACTION_ALLOW;
flow_act.vlan[0].ethtype = ETH_P_8021Q;
flow_act.vlan[0].vid = 0;
flow_act.vlan[0].prio = 0;
if (vport->ingress.offloads.modify_metadata_rule) {
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
flow_act.modify_hdr = vport->ingress.offloads.modify_metadata;
}
vport->ingress.allow_rule = mlx5_add_flow_rules(vport->ingress.acl, spec,
&flow_act, NULL, 0);
if (IS_ERR(vport->ingress.allow_rule)) {
err = PTR_ERR(vport->ingress.allow_rule);
esw_warn(esw->dev,
"vport[%d] configure ingress untagged allow rule, err(%d)\n",
vport->vport, err);
vport->ingress.allow_rule = NULL;
}
kvfree(spec);
return err;
}
static int esw_acl_ingress_mod_metadata_create(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
struct mlx5_flow_act flow_act = {};
int err = 0;
u32 key;
key = mlx5_eswitch_get_vport_metadata_for_match(esw, vport->vport);
key >>= ESW_SOURCE_PORT_METADATA_OFFSET;
MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
MLX5_SET(set_action_in, action, field,
MLX5_ACTION_IN_FIELD_METADATA_REG_C_0);
MLX5_SET(set_action_in, action, data, key);
MLX5_SET(set_action_in, action, offset,
ESW_SOURCE_PORT_METADATA_OFFSET);
MLX5_SET(set_action_in, action, length,
ESW_SOURCE_PORT_METADATA_BITS);
vport->ingress.offloads.modify_metadata =
mlx5_modify_header_alloc(esw->dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS,
1, action);
if (IS_ERR(vport->ingress.offloads.modify_metadata)) {
err = PTR_ERR(vport->ingress.offloads.modify_metadata);
esw_warn(esw->dev,
"failed to alloc modify header for vport %d ingress acl (%d)\n",
vport->vport, err);
return err;
}
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR | MLX5_FLOW_CONTEXT_ACTION_ALLOW;
flow_act.modify_hdr = vport->ingress.offloads.modify_metadata;
vport->ingress.offloads.modify_metadata_rule =
mlx5_add_flow_rules(vport->ingress.acl,
NULL, &flow_act, NULL, 0);
if (IS_ERR(vport->ingress.offloads.modify_metadata_rule)) {
err = PTR_ERR(vport->ingress.offloads.modify_metadata_rule);
esw_warn(esw->dev,
"failed to add setting metadata rule for vport %d ingress acl, err(%d)\n",
vport->vport, err);
mlx5_modify_header_dealloc(esw->dev, vport->ingress.offloads.modify_metadata);
vport->ingress.offloads.modify_metadata_rule = NULL;
}
return err;
}
static void esw_acl_ingress_mod_metadata_destroy(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
if (!vport->ingress.offloads.modify_metadata_rule)
return;
mlx5_del_flow_rules(vport->ingress.offloads.modify_metadata_rule);
mlx5_modify_header_dealloc(esw->dev, vport->ingress.offloads.modify_metadata);
vport->ingress.offloads.modify_metadata_rule = NULL;
}
static int esw_acl_ingress_ofld_rules_create(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
int err;
if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
err = esw_acl_ingress_mod_metadata_create(esw, vport);
if (err) {
esw_warn(esw->dev,
"vport(%d) create ingress modify metadata, err(%d)\n",
vport->vport, err);
return err;
}
}
if (esw_acl_ingress_prio_tag_enabled(esw, vport)) {
err = esw_acl_ingress_prio_tag_create(esw, vport);
if (err) {
esw_warn(esw->dev,
"vport(%d) create ingress prio tag rule, err(%d)\n",
vport->vport, err);
goto prio_tag_err;
}
}
return 0;
prio_tag_err:
esw_acl_ingress_mod_metadata_destroy(esw, vport);
return err;
}
static void esw_acl_ingress_ofld_rules_destroy(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
esw_acl_ingress_allow_rule_destroy(vport);
esw_acl_ingress_mod_metadata_destroy(esw, vport);
}
static int esw_acl_ingress_ofld_groups_create(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_group *g;
void *match_criteria;
u32 *flow_group_in;
u32 flow_index = 0;
int ret = 0;
flow_group_in = kvzalloc(inlen, GFP_KERNEL);
if (!flow_group_in)
return -ENOMEM;
if (esw_acl_ingress_prio_tag_enabled(esw, vport)) {
/* This group is to hold FTE to match untagged packets when prio_tag
* is enabled.
*/
match_criteria = MLX5_ADDR_OF(create_flow_group_in,
flow_group_in, match_criteria);
MLX5_SET(create_flow_group_in, flow_group_in,
match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_index);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_index);
g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
if (IS_ERR(g)) {
ret = PTR_ERR(g);
esw_warn(esw->dev, "vport[%d] ingress create untagged flow group, err(%d)\n",
vport->vport, ret);
goto prio_tag_err;
}
vport->ingress.offloads.metadata_prio_tag_grp = g;
flow_index++;
}
if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
/* This group holds an FTE with no match to add metadata for
* tagged packets if prio-tag is enabled, or for all untagged
* traffic in case prio-tag is disabled.
*/
memset(flow_group_in, 0, inlen);
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_index);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_index);
g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
if (IS_ERR(g)) {
ret = PTR_ERR(g);
esw_warn(esw->dev, "vport[%d] ingress create drop flow group, err(%d)\n",
vport->vport, ret);
goto metadata_err;
}
vport->ingress.offloads.metadata_allmatch_grp = g;
}
kvfree(flow_group_in);
return 0;
metadata_err:
if (!IS_ERR_OR_NULL(vport->ingress.offloads.metadata_prio_tag_grp)) {
mlx5_destroy_flow_group(vport->ingress.offloads.metadata_prio_tag_grp);
vport->ingress.offloads.metadata_prio_tag_grp = NULL;
}
prio_tag_err:
kvfree(flow_group_in);
return ret;
}
static void esw_acl_ingress_ofld_groups_destroy(struct mlx5_vport *vport)
{
if (vport->ingress.offloads.metadata_allmatch_grp) {
mlx5_destroy_flow_group(vport->ingress.offloads.metadata_allmatch_grp);
vport->ingress.offloads.metadata_allmatch_grp = NULL;
}
if (vport->ingress.offloads.metadata_prio_tag_grp) {
mlx5_destroy_flow_group(vport->ingress.offloads.metadata_prio_tag_grp);
vport->ingress.offloads.metadata_prio_tag_grp = NULL;
}
}
int esw_acl_ingress_ofld_setup(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
int num_ftes = 0;
int err;
if (!mlx5_eswitch_vport_match_metadata_enabled(esw) &&
!esw_acl_ingress_prio_tag_enabled(esw, vport))
return 0;
esw_acl_ingress_allow_rule_destroy(vport);
if (mlx5_eswitch_vport_match_metadata_enabled(esw))
num_ftes++;
if (esw_acl_ingress_prio_tag_enabled(esw, vport))
num_ftes++;
vport->ingress.acl = esw_acl_table_create(esw, vport->vport,
MLX5_FLOW_NAMESPACE_ESW_INGRESS,
num_ftes);
if (IS_ERR_OR_NULL(vport->ingress.acl)) {
err = PTR_ERR(vport->ingress.acl);
vport->ingress.acl = NULL;
return err;
}
err = esw_acl_ingress_ofld_groups_create(esw, vport);
if (err)
goto group_err;
esw_debug(esw->dev,
"vport[%d] configure ingress rules\n", vport->vport);
err = esw_acl_ingress_ofld_rules_create(esw, vport);
if (err)
goto rules_err;
return 0;
rules_err:
esw_acl_ingress_ofld_groups_destroy(vport);
group_err:
esw_acl_ingress_table_destroy(vport);
return err;
}
void esw_acl_ingress_ofld_cleanup(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
esw_acl_ingress_ofld_rules_destroy(esw, vport);
esw_acl_ingress_ofld_groups_destroy(vport);
esw_acl_ingress_table_destroy(vport);
}

View File

@ -10,4 +10,8 @@
int esw_acl_egress_lgcy_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
void esw_acl_egress_lgcy_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
/* Eswitch acl ingress external APIs */
int esw_acl_ingress_lgcy_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
void esw_acl_ingress_lgcy_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
#endif /* __MLX5_ESWITCH_ACL_LGCY_H__ */

View File

@ -10,4 +10,8 @@
int esw_acl_egress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
void esw_acl_egress_ofld_cleanup(struct mlx5_vport *vport);
/* Eswitch acl ingress external APIs */
int esw_acl_ingress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
void esw_acl_ingress_ofld_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
#endif /* __MLX5_ESWITCH_ACL_OFLD_H__ */

View File

@ -937,301 +937,6 @@ static void esw_vport_change_handler(struct work_struct *work)
mutex_unlock(&esw->state_lock);
}
static int
esw_vport_create_legacy_ingress_acl_groups(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_core_dev *dev = esw->dev;
struct mlx5_flow_group *g;
void *match_criteria;
u32 *flow_group_in;
int err;
flow_group_in = kvzalloc(inlen, GFP_KERNEL);
if (!flow_group_in)
return -ENOMEM;
match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_47_16);
MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_15_0);
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
if (IS_ERR(g)) {
err = PTR_ERR(g);
esw_warn(dev, "vport[%d] ingress create untagged spoofchk flow group, err(%d)\n",
vport->vport, err);
goto spoof_err;
}
vport->ingress.legacy.allow_untagged_spoofchk_grp = g;
memset(flow_group_in, 0, inlen);
MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
if (IS_ERR(g)) {
err = PTR_ERR(g);
esw_warn(dev, "vport[%d] ingress create untagged flow group, err(%d)\n",
vport->vport, err);
goto untagged_err;
}
vport->ingress.legacy.allow_untagged_only_grp = g;
memset(flow_group_in, 0, inlen);
MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_47_16);
MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_15_0);
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 2);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 2);
g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
if (IS_ERR(g)) {
err = PTR_ERR(g);
esw_warn(dev, "vport[%d] ingress create spoofchk flow group, err(%d)\n",
vport->vport, err);
goto allow_spoof_err;
}
vport->ingress.legacy.allow_spoofchk_only_grp = g;
memset(flow_group_in, 0, inlen);
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 3);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 3);
g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
if (IS_ERR(g)) {
err = PTR_ERR(g);
esw_warn(dev, "vport[%d] ingress create drop flow group, err(%d)\n",
vport->vport, err);
goto drop_err;
}
vport->ingress.legacy.drop_grp = g;
kvfree(flow_group_in);
return 0;
drop_err:
if (!IS_ERR_OR_NULL(vport->ingress.legacy.allow_spoofchk_only_grp)) {
mlx5_destroy_flow_group(vport->ingress.legacy.allow_spoofchk_only_grp);
vport->ingress.legacy.allow_spoofchk_only_grp = NULL;
}
allow_spoof_err:
if (!IS_ERR_OR_NULL(vport->ingress.legacy.allow_untagged_only_grp)) {
mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_only_grp);
vport->ingress.legacy.allow_untagged_only_grp = NULL;
}
untagged_err:
if (!IS_ERR_OR_NULL(vport->ingress.legacy.allow_untagged_spoofchk_grp)) {
mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_spoofchk_grp);
vport->ingress.legacy.allow_untagged_spoofchk_grp = NULL;
}
spoof_err:
kvfree(flow_group_in);
return err;
}
int esw_vport_create_ingress_acl_table(struct mlx5_eswitch *esw,
struct mlx5_vport *vport, int table_size)
{
struct mlx5_core_dev *dev = esw->dev;
struct mlx5_flow_namespace *root_ns;
struct mlx5_flow_table *acl;
int vport_index;
int err;
if (!MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support))
return -EOPNOTSUPP;
esw_debug(dev, "Create vport[%d] ingress ACL log_max_size(%d)\n",
vport->vport, MLX5_CAP_ESW_INGRESS_ACL(dev, log_max_ft_size));
vport_index = mlx5_eswitch_vport_num_to_index(esw, vport->vport);
root_ns = mlx5_get_flow_vport_acl_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS,
vport_index);
if (!root_ns) {
esw_warn(dev, "Failed to get E-Switch ingress flow namespace for vport (%d)\n",
vport->vport);
return -EOPNOTSUPP;
}
acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
if (IS_ERR(acl)) {
err = PTR_ERR(acl);
esw_warn(dev, "vport[%d] ingress create flow Table, err(%d)\n",
vport->vport, err);
return err;
}
vport->ingress.acl = acl;
return 0;
}
void esw_vport_destroy_ingress_acl_table(struct mlx5_vport *vport)
{
if (!vport->ingress.acl)
return;
mlx5_destroy_flow_table(vport->ingress.acl);
vport->ingress.acl = NULL;
}
void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
if (vport->ingress.legacy.drop_rule) {
mlx5_del_flow_rules(vport->ingress.legacy.drop_rule);
vport->ingress.legacy.drop_rule = NULL;
}
if (vport->ingress.allow_rule) {
mlx5_del_flow_rules(vport->ingress.allow_rule);
vport->ingress.allow_rule = NULL;
}
}
static void esw_vport_disable_legacy_ingress_acl(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
if (!vport->ingress.acl)
return;
esw_debug(esw->dev, "Destroy vport[%d] E-Switch ingress ACL\n", vport->vport);
esw_vport_cleanup_ingress_rules(esw, vport);
if (vport->ingress.legacy.allow_spoofchk_only_grp) {
mlx5_destroy_flow_group(vport->ingress.legacy.allow_spoofchk_only_grp);
vport->ingress.legacy.allow_spoofchk_only_grp = NULL;
}
if (vport->ingress.legacy.allow_untagged_only_grp) {
mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_only_grp);
vport->ingress.legacy.allow_untagged_only_grp = NULL;
}
if (vport->ingress.legacy.allow_untagged_spoofchk_grp) {
mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_spoofchk_grp);
vport->ingress.legacy.allow_untagged_spoofchk_grp = NULL;
}
if (vport->ingress.legacy.drop_grp) {
mlx5_destroy_flow_group(vport->ingress.legacy.drop_grp);
vport->ingress.legacy.drop_grp = NULL;
}
esw_vport_destroy_ingress_acl_table(vport);
}
static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
struct mlx5_fc *counter = vport->ingress.legacy.drop_counter;
struct mlx5_flow_destination drop_ctr_dst = {0};
struct mlx5_flow_destination *dst = NULL;
struct mlx5_flow_act flow_act = {0};
struct mlx5_flow_spec *spec = NULL;
int dest_num = 0;
int err = 0;
u8 *smac_v;
/* The ingress acl table contains 4 groups
* (2 active rules at the same time -
* 1 allow rule from one of the first 3 groups.
* 1 drop rule from the last group):
* 1)Allow untagged traffic with smac=original mac.
* 2)Allow untagged traffic.
* 3)Allow traffic with smac=original mac.
* 4)Drop all other traffic.
*/
int table_size = 4;
esw_vport_cleanup_ingress_rules(esw, vport);
if (!vport->info.vlan && !vport->info.qos && !vport->info.spoofchk) {
esw_vport_disable_legacy_ingress_acl(esw, vport);
return 0;
}
if (!vport->ingress.acl) {
err = esw_vport_create_ingress_acl_table(esw, vport, table_size);
if (err) {
esw_warn(esw->dev,
"vport[%d] enable ingress acl err (%d)\n",
err, vport->vport);
return err;
}
err = esw_vport_create_legacy_ingress_acl_groups(esw, vport);
if (err)
goto out;
}
esw_debug(esw->dev,
"vport[%d] configure ingress rules, vlan(%d) qos(%d)\n",
vport->vport, vport->info.vlan, vport->info.qos);
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec) {
err = -ENOMEM;
goto out;
}
if (vport->info.vlan || vport->info.qos)
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
if (vport->info.spoofchk) {
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_47_16);
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_15_0);
smac_v = MLX5_ADDR_OF(fte_match_param,
spec->match_value,
outer_headers.smac_47_16);
ether_addr_copy(smac_v, vport->info.mac);
}
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
vport->ingress.allow_rule =
mlx5_add_flow_rules(vport->ingress.acl, spec,
&flow_act, NULL, 0);
if (IS_ERR(vport->ingress.allow_rule)) {
err = PTR_ERR(vport->ingress.allow_rule);
esw_warn(esw->dev,
"vport[%d] configure ingress allow rule, err(%d)\n",
vport->vport, err);
vport->ingress.allow_rule = NULL;
goto out;
}
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
/* Attach drop flow counter */
if (counter) {
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
drop_ctr_dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
drop_ctr_dst.counter_id = mlx5_fc_id(counter);
dst = &drop_ctr_dst;
dest_num++;
}
vport->ingress.legacy.drop_rule =
mlx5_add_flow_rules(vport->ingress.acl, NULL,
&flow_act, dst, dest_num);
if (IS_ERR(vport->ingress.legacy.drop_rule)) {
err = PTR_ERR(vport->ingress.legacy.drop_rule);
esw_warn(esw->dev,
"vport[%d] configure ingress drop rule, err(%d)\n",
vport->vport, err);
vport->ingress.legacy.drop_rule = NULL;
goto out;
}
kvfree(spec);
return 0;
out:
esw_vport_disable_legacy_ingress_acl(esw, vport);
kvfree(spec);
return err;
}
static bool element_type_supported(struct mlx5_eswitch *esw, int type)
{
const struct mlx5_core_dev *dev = esw->dev;
@ -1443,17 +1148,7 @@ static int esw_vport_create_legacy_acl_tables(struct mlx5_eswitch *esw,
if (mlx5_esw_is_manager_vport(esw, vport->vport))
return 0;
if (MLX5_CAP_ESW_INGRESS_ACL(esw->dev, flow_counter)) {
vport->ingress.legacy.drop_counter = mlx5_fc_create(esw->dev, false);
if (IS_ERR(vport->ingress.legacy.drop_counter)) {
esw_warn(esw->dev,
"vport[%d] configure ingress drop rule counter failed\n",
vport->vport);
vport->ingress.legacy.drop_counter = NULL;
}
}
ret = esw_vport_ingress_config(esw, vport);
ret = esw_acl_ingress_lgcy_setup(esw, vport);
if (ret)
goto ingress_err;
@ -1464,10 +1159,8 @@ static int esw_vport_create_legacy_acl_tables(struct mlx5_eswitch *esw,
return 0;
egress_err:
esw_vport_disable_legacy_ingress_acl(esw, vport);
esw_acl_ingress_lgcy_cleanup(esw, vport);
ingress_err:
mlx5_fc_destroy(esw->dev, vport->ingress.legacy.drop_counter);
vport->ingress.legacy.drop_counter = NULL;
return ret;
}
@ -1488,10 +1181,7 @@ static void esw_vport_destroy_legacy_acl_tables(struct mlx5_eswitch *esw,
return;
esw_acl_egress_lgcy_cleanup(esw, vport);
esw_vport_disable_legacy_ingress_acl(esw, vport);
mlx5_fc_destroy(esw->dev, vport->ingress.legacy.drop_counter);
vport->ingress.legacy.drop_counter = NULL;
esw_acl_ingress_lgcy_cleanup(esw, vport);
}
static void esw_vport_cleanup_acl(struct mlx5_eswitch *esw,
@ -2123,7 +1813,7 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
ether_addr_copy(evport->info.mac, mac);
evport->info.node_guid = node_guid;
if (evport->enabled && esw->mode == MLX5_ESWITCH_LEGACY)
err = esw_vport_ingress_config(esw, evport);
err = esw_acl_ingress_lgcy_setup(esw, evport);
unlock:
mutex_unlock(&esw->state_lock);
@ -2205,7 +1895,7 @@ int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
evport->info.vlan = vlan;
evport->info.qos = qos;
if (evport->enabled && esw->mode == MLX5_ESWITCH_LEGACY) {
err = esw_vport_ingress_config(esw, evport);
err = esw_acl_ingress_lgcy_setup(esw, evport);
if (err)
return err;
err = esw_acl_egress_lgcy_setup(esw, evport);
@ -2250,7 +1940,7 @@ int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
"Spoofchk in set while MAC is invalid, vport(%d)\n",
evport->vport);
if (evport->enabled && esw->mode == MLX5_ESWITCH_LEGACY)
err = esw_vport_ingress_config(esw, evport);
err = esw_acl_ingress_lgcy_setup(esw, evport);
if (err)
evport->info.spoofchk = pschk;
mutex_unlock(&esw->state_lock);

View File

@ -285,12 +285,6 @@ void esw_offloads_disable(struct mlx5_eswitch *esw);
int esw_offloads_enable(struct mlx5_eswitch *esw);
void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw);
int esw_offloads_init_reps(struct mlx5_eswitch *esw);
void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
struct mlx5_vport *vport);
int esw_vport_create_ingress_acl_table(struct mlx5_eswitch *esw,
struct mlx5_vport *vport,
int table_size);
void esw_vport_destroy_ingress_acl_table(struct mlx5_vport *vport);
int mlx5_esw_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num,
u32 rate_mbps);

View File

@ -235,13 +235,6 @@ static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw,
return &esw->offloads.vport_reps[idx];
}
static bool
esw_check_ingress_prio_tag_enabled(const struct mlx5_eswitch *esw,
const struct mlx5_vport *vport)
{
return (MLX5_CAP_GEN(esw->dev, prio_tag_required) &&
mlx5_eswitch_is_vf_vport(esw, vport->vport));
}
static void
mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
@ -1852,248 +1845,6 @@ static void esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
mlx5_devcom_unregister_component(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
}
static int esw_vport_ingress_prio_tag_config(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
struct mlx5_flow_act flow_act = {0};
struct mlx5_flow_spec *spec;
int err = 0;
/* For prio tag mode, there is only 1 FTEs:
* 1) Untagged packets - push prio tag VLAN and modify metadata if
* required, allow
* Unmatched traffic is allowed by default
*/
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec)
return -ENOMEM;
/* Untagged packets - push prio tag VLAN, allow */
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 0);
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
MLX5_FLOW_CONTEXT_ACTION_ALLOW;
flow_act.vlan[0].ethtype = ETH_P_8021Q;
flow_act.vlan[0].vid = 0;
flow_act.vlan[0].prio = 0;
if (vport->ingress.offloads.modify_metadata_rule) {
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
flow_act.modify_hdr = vport->ingress.offloads.modify_metadata;
}
vport->ingress.allow_rule =
mlx5_add_flow_rules(vport->ingress.acl, spec,
&flow_act, NULL, 0);
if (IS_ERR(vport->ingress.allow_rule)) {
err = PTR_ERR(vport->ingress.allow_rule);
esw_warn(esw->dev,
"vport[%d] configure ingress untagged allow rule, err(%d)\n",
vport->vport, err);
vport->ingress.allow_rule = NULL;
}
kvfree(spec);
return err;
}
static int esw_vport_add_ingress_acl_modify_metadata(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
struct mlx5_flow_act flow_act = {};
int err = 0;
u32 key;
key = mlx5_eswitch_get_vport_metadata_for_match(esw, vport->vport);
key >>= ESW_SOURCE_PORT_METADATA_OFFSET;
MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
MLX5_SET(set_action_in, action, field,
MLX5_ACTION_IN_FIELD_METADATA_REG_C_0);
MLX5_SET(set_action_in, action, data, key);
MLX5_SET(set_action_in, action, offset,
ESW_SOURCE_PORT_METADATA_OFFSET);
MLX5_SET(set_action_in, action, length,
ESW_SOURCE_PORT_METADATA_BITS);
vport->ingress.offloads.modify_metadata =
mlx5_modify_header_alloc(esw->dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS,
1, action);
if (IS_ERR(vport->ingress.offloads.modify_metadata)) {
err = PTR_ERR(vport->ingress.offloads.modify_metadata);
esw_warn(esw->dev,
"failed to alloc modify header for vport %d ingress acl (%d)\n",
vport->vport, err);
return err;
}
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR | MLX5_FLOW_CONTEXT_ACTION_ALLOW;
flow_act.modify_hdr = vport->ingress.offloads.modify_metadata;
vport->ingress.offloads.modify_metadata_rule =
mlx5_add_flow_rules(vport->ingress.acl,
NULL, &flow_act, NULL, 0);
if (IS_ERR(vport->ingress.offloads.modify_metadata_rule)) {
err = PTR_ERR(vport->ingress.offloads.modify_metadata_rule);
esw_warn(esw->dev,
"failed to add setting metadata rule for vport %d ingress acl, err(%d)\n",
vport->vport, err);
mlx5_modify_header_dealloc(esw->dev, vport->ingress.offloads.modify_metadata);
vport->ingress.offloads.modify_metadata_rule = NULL;
}
return err;
}
static void esw_vport_del_ingress_acl_modify_metadata(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
if (vport->ingress.offloads.modify_metadata_rule) {
mlx5_del_flow_rules(vport->ingress.offloads.modify_metadata_rule);
mlx5_modify_header_dealloc(esw->dev, vport->ingress.offloads.modify_metadata);
vport->ingress.offloads.modify_metadata_rule = NULL;
}
}
static int esw_vport_create_ingress_acl_group(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_group *g;
void *match_criteria;
u32 *flow_group_in;
u32 flow_index = 0;
int ret = 0;
flow_group_in = kvzalloc(inlen, GFP_KERNEL);
if (!flow_group_in)
return -ENOMEM;
if (esw_check_ingress_prio_tag_enabled(esw, vport)) {
/* This group is to hold FTE to match untagged packets when prio_tag
* is enabled.
*/
memset(flow_group_in, 0, inlen);
match_criteria = MLX5_ADDR_OF(create_flow_group_in,
flow_group_in, match_criteria);
MLX5_SET(create_flow_group_in, flow_group_in,
match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_index);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_index);
g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
if (IS_ERR(g)) {
ret = PTR_ERR(g);
esw_warn(esw->dev, "vport[%d] ingress create untagged flow group, err(%d)\n",
vport->vport, ret);
goto prio_tag_err;
}
vport->ingress.offloads.metadata_prio_tag_grp = g;
flow_index++;
}
if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
/* This group holds an FTE with no matches for add metadata for
* tagged packets, if prio-tag is enabled (as a fallthrough),
* or all traffic in case prio-tag is disabled.
*/
memset(flow_group_in, 0, inlen);
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_index);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_index);
g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
if (IS_ERR(g)) {
ret = PTR_ERR(g);
esw_warn(esw->dev, "vport[%d] ingress create drop flow group, err(%d)\n",
vport->vport, ret);
goto metadata_err;
}
vport->ingress.offloads.metadata_allmatch_grp = g;
}
kvfree(flow_group_in);
return 0;
metadata_err:
if (!IS_ERR_OR_NULL(vport->ingress.offloads.metadata_prio_tag_grp)) {
mlx5_destroy_flow_group(vport->ingress.offloads.metadata_prio_tag_grp);
vport->ingress.offloads.metadata_prio_tag_grp = NULL;
}
prio_tag_err:
kvfree(flow_group_in);
return ret;
}
static void esw_vport_destroy_ingress_acl_group(struct mlx5_vport *vport)
{
if (vport->ingress.offloads.metadata_allmatch_grp) {
mlx5_destroy_flow_group(vport->ingress.offloads.metadata_allmatch_grp);
vport->ingress.offloads.metadata_allmatch_grp = NULL;
}
if (vport->ingress.offloads.metadata_prio_tag_grp) {
mlx5_destroy_flow_group(vport->ingress.offloads.metadata_prio_tag_grp);
vport->ingress.offloads.metadata_prio_tag_grp = NULL;
}
}
static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
int num_ftes = 0;
int err;
if (!mlx5_eswitch_vport_match_metadata_enabled(esw) &&
!esw_check_ingress_prio_tag_enabled(esw, vport))
return 0;
esw_vport_cleanup_ingress_rules(esw, vport);
if (mlx5_eswitch_vport_match_metadata_enabled(esw))
num_ftes++;
if (esw_check_ingress_prio_tag_enabled(esw, vport))
num_ftes++;
err = esw_vport_create_ingress_acl_table(esw, vport, num_ftes);
if (err) {
esw_warn(esw->dev,
"failed to enable ingress acl (%d) on vport[%d]\n",
err, vport->vport);
return err;
}
err = esw_vport_create_ingress_acl_group(esw, vport);
if (err)
goto group_err;
esw_debug(esw->dev,
"vport[%d] configure ingress rules\n", vport->vport);
if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
err = esw_vport_add_ingress_acl_modify_metadata(esw, vport);
if (err)
goto metadata_err;
}
if (esw_check_ingress_prio_tag_enabled(esw, vport)) {
err = esw_vport_ingress_prio_tag_config(esw, vport);
if (err)
goto prio_tag_err;
}
return 0;
prio_tag_err:
esw_vport_del_ingress_acl_modify_metadata(esw, vport);
metadata_err:
esw_vport_destroy_ingress_acl_group(vport);
group_err:
esw_vport_destroy_ingress_acl_table(vport);
return err;
}
static bool
esw_check_vport_match_metadata_supported(const struct mlx5_eswitch *esw)
{
@ -2132,19 +1883,20 @@ esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw,
{
int err;
err = esw_vport_ingress_config(esw, vport);
err = esw_acl_ingress_ofld_setup(esw, vport);
if (err)
return err;
if (mlx5_eswitch_is_vf_vport(esw, vport->vport)) {
err = esw_acl_egress_ofld_setup(esw, vport);
if (err) {
esw_vport_cleanup_ingress_rules(esw, vport);
esw_vport_del_ingress_acl_modify_metadata(esw, vport);
esw_vport_destroy_ingress_acl_group(vport);
esw_vport_destroy_ingress_acl_table(vport);
}
if (err)
goto egress_err;
}
return 0;
egress_err:
esw_acl_ingress_ofld_cleanup(esw, vport);
return err;
}
@ -2153,10 +1905,7 @@ esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
esw_acl_egress_ofld_cleanup(vport);
esw_vport_cleanup_ingress_rules(esw, vport);
esw_vport_del_ingress_acl_modify_metadata(esw, vport);
esw_vport_destroy_ingress_acl_group(vport);
esw_vport_destroy_ingress_acl_table(vport);
esw_acl_ingress_ofld_cleanup(esw, vport);
}
static int esw_create_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)