net/mlx5e: TC, Offload rewrite and mirror on tunnel over ovs internal port

To offload the encap rule when the tunnel IP is configured on an
openvswitch internal port, driver need to overwrite vport metadata in
reg_c0 to the value assigned to the internal port, then forward
packets to root table to be processed again by the rules matching on
the metadata for such internal port.

When such rule is combined with header rewrite and mirror, openvswitch
generates the rule like the following, because it resets mirror after
packets are modified.
    in_port(enp8s0f0npf0sf1),..,
        actions:enp8s0f0npf0sf2,set(tunnel(...)),set(ipv4(...)),vxlan_sys_4789,enp8s0f0npf0sf2

The split_count was introduced before to support rewrite and mirror.
Driver splits the rule into two different hardware rules in order to
offload it. But it's not enough to offload the above complicated rule
because of the limitations, in both driver and firmware.

To resolve this issue, the destination array is split again after the
destination indexed by split_count. An extra rule is added for the
leftover destinations (in the above example, it is enp8s0f0npf0sf2),
and is inserted to post_act table. And the extra destination is added
in the original rule to forward to post_act table, so the extra mirror
is done there.

Signed-off-by: Jianbo Liu <jianbol@nvidia.com>
Reviewed-by: Cosmin Ratiu <cratiu@nvidia.com>
Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
Link: https://patch.msgid.link/20240808055927.2059700-4-tariqt@nvidia.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jianbo Liu 2024-08-08 08:59:19 +03:00 committed by Jakub Kicinski
parent 88c46f6103
commit 16bb8c6133
4 changed files with 112 additions and 0 deletions

View File

@ -109,6 +109,7 @@ struct mlx5e_tc_flow {
struct completion init_done;
struct completion del_hw_done;
struct mlx5_flow_attr *attr;
struct mlx5_flow_attr *extra_split_attr;
struct list_head attrs;
u32 chain_mapping;
};

View File

@ -1739,11 +1739,102 @@ has_encap_dests(struct mlx5_flow_attr *attr)
return false;
}
static int
extra_split_attr_dests_needed(struct mlx5e_tc_flow *flow, struct mlx5_flow_attr *attr)
{
struct mlx5_esw_flow_attr *esw_attr;
if (flow->attr != attr ||
!list_is_first(&attr->list, &flow->attrs))
return 0;
esw_attr = attr->esw_attr;
if (!esw_attr->split_count ||
esw_attr->split_count == esw_attr->out_count - 1)
return 0;
if (esw_attr->dest_int_port &&
(esw_attr->dests[esw_attr->split_count].flags &
MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE))
return esw_attr->split_count + 1;
return 0;
}
static int
extra_split_attr_dests(struct mlx5e_tc_flow *flow,
struct mlx5_flow_attr *attr, int split_count)
{
struct mlx5e_post_act *post_act = get_post_action(flow->priv);
struct mlx5e_tc_flow_parse_attr *parse_attr, *parse_attr2;
struct mlx5_esw_flow_attr *esw_attr, *esw_attr2;
struct mlx5e_post_act_handle *handle;
struct mlx5_flow_attr *attr2;
int i, j, err;
if (IS_ERR(post_act))
return PTR_ERR(post_act);
attr2 = mlx5_alloc_flow_attr(mlx5e_get_flow_namespace(flow));
parse_attr2 = kvzalloc(sizeof(*parse_attr), GFP_KERNEL);
if (!attr2 || !parse_attr2) {
err = -ENOMEM;
goto err_free;
}
attr2->parse_attr = parse_attr2;
handle = mlx5e_tc_post_act_add(post_act, attr2);
if (IS_ERR(handle)) {
err = PTR_ERR(handle);
goto err_free;
}
esw_attr = attr->esw_attr;
esw_attr2 = attr2->esw_attr;
esw_attr2->in_rep = esw_attr->in_rep;
parse_attr = attr->parse_attr;
parse_attr2->filter_dev = parse_attr->filter_dev;
for (i = split_count, j = 0; i < esw_attr->out_count; i++, j++)
esw_attr2->dests[j] = esw_attr->dests[i];
esw_attr2->out_count = j;
attr2->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
err = mlx5e_tc_post_act_offload(post_act, handle);
if (err)
goto err_post_act_offload;
err = mlx5e_tc_post_act_set_handle(flow->priv->mdev, handle,
&parse_attr->mod_hdr_acts);
if (err)
goto err_post_act_set_handle;
esw_attr->out_count = split_count;
attr->extra_split_ft = mlx5e_tc_post_act_get_ft(post_act);
flow->extra_split_attr = attr2;
attr2->post_act_handle = handle;
return 0;
err_post_act_set_handle:
mlx5e_tc_post_act_unoffload(post_act, handle);
err_post_act_offload:
mlx5e_tc_post_act_del(post_act, handle);
err_free:
kvfree(parse_attr2);
kfree(attr2);
return err;
}
static int
post_process_attr(struct mlx5e_tc_flow *flow,
struct mlx5_flow_attr *attr,
struct netlink_ext_ack *extack)
{
int extra_split;
bool vf_tun;
int err = 0;
@ -1757,6 +1848,13 @@ post_process_attr(struct mlx5e_tc_flow *flow,
goto err_out;
}
extra_split = extra_split_attr_dests_needed(flow, attr);
if (extra_split > 0) {
err = extra_split_attr_dests(flow, attr, extra_split);
if (err)
goto err_out;
}
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
err = mlx5e_tc_attach_mod_hdr(flow->priv, flow, attr);
if (err)
@ -1971,6 +2069,11 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
mlx5e_tc_act_stats_del_flow(get_act_stats_handle(priv), flow);
free_flow_post_acts(flow);
if (flow->extra_split_attr) {
mlx5_free_flow_attr_actions(flow, flow->extra_split_attr);
kvfree(flow->extra_split_attr->parse_attr);
kfree(flow->extra_split_attr);
}
mlx5_free_flow_attr_actions(flow, attr);
kvfree(attr->esw_attr->rx_tun_attr);

View File

@ -86,6 +86,7 @@ struct mlx5_flow_attr {
u32 dest_chain;
struct mlx5_flow_table *ft;
struct mlx5_flow_table *dest_ft;
struct mlx5_flow_table *extra_split_ft;
u8 inner_match_level;
u8 outer_match_level;
u8 tun_ip_version;

View File

@ -613,6 +613,13 @@ esw_setup_dests(struct mlx5_flow_destination *dest,
}
}
if (attr->extra_split_ft) {
flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
dest[*i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest[*i].ft = attr->extra_split_ft;
(*i)++;
}
out:
return err;
}