flow_offload: add skip_hw and skip_sw to control if offload the action

We add skip_hw and skip_sw for user to control if offload the action
to hardware.

We also add in_hw_count for user to indicate if the action is offloaded
to any hardware.

Signed-off-by: Baowen Zheng <baowen.zheng@corigine.com>
Signed-off-by: Simon Horman <simon.horman@corigine.com>
Acked-by: Jamal Hadi Salim <jhs@mojatatu.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Baowen Zheng 2021-12-17 19:16:23 +01:00 committed by David S. Miller
parent 8cbfe939ab
commit 7adc576512
3 changed files with 84 additions and 9 deletions

View File

@ -44,6 +44,7 @@ struct tc_action {
u8 hw_stats;
u8 used_hw_stats;
bool used_hw_stats_valid;
u32 in_hw_count;
};
#define tcf_index common.tcfa_index
#define tcf_refcnt common.tcfa_refcnt

View File

@ -19,13 +19,16 @@ enum {
TCA_ACT_FLAGS,
TCA_ACT_HW_STATS,
TCA_ACT_USED_HW_STATS,
TCA_ACT_IN_HW_COUNT,
__TCA_ACT_MAX
};
/* See other TCA_ACT_FLAGS_ * flags in include/net/act_api.h. */
#define TCA_ACT_FLAGS_NO_PERCPU_STATS 1 /* Don't use percpu allocator for
* actions stats.
*/
#define TCA_ACT_FLAGS_NO_PERCPU_STATS (1 << 0) /* Don't use percpu allocator for
* actions stats.
*/
#define TCA_ACT_FLAGS_SKIP_HW (1 << 1) /* don't offload action to HW */
#define TCA_ACT_FLAGS_SKIP_SW (1 << 2) /* don't use action in SW */
/* tca HW stats type
* When user does not pass the attribute, he does not care.

View File

@ -131,6 +131,12 @@ static void free_tcf(struct tc_action *p)
kfree(p);
}
static void offload_action_hw_count_set(struct tc_action *act,
u32 hw_count)
{
act->in_hw_count = hw_count;
}
static unsigned int tcf_offload_act_num_actions_single(struct tc_action *act)
{
if (is_tcf_pedit(act))
@ -139,6 +145,29 @@ static unsigned int tcf_offload_act_num_actions_single(struct tc_action *act)
return 1;
}
static bool tc_act_skip_hw(u32 flags)
{
return (flags & TCA_ACT_FLAGS_SKIP_HW) ? true : false;
}
static bool tc_act_skip_sw(u32 flags)
{
return (flags & TCA_ACT_FLAGS_SKIP_SW) ? true : false;
}
static bool tc_act_in_hw(struct tc_action *act)
{
return !!act->in_hw_count;
}
/* SKIP_HW and SKIP_SW are mutually exclusive flags. */
static bool tc_act_flags_valid(u32 flags)
{
flags &= TCA_ACT_FLAGS_SKIP_HW | TCA_ACT_FLAGS_SKIP_SW;
return flags ^ (TCA_ACT_FLAGS_SKIP_HW | TCA_ACT_FLAGS_SKIP_SW);
}
static int offload_action_init(struct flow_offload_action *fl_action,
struct tc_action *act,
enum offload_act_command cmd,
@ -155,6 +184,7 @@ static int offload_action_init(struct flow_offload_action *fl_action,
}
static int tcf_action_offload_cmd(struct flow_offload_action *fl_act,
u32 *hw_count,
struct netlink_ext_ack *extack)
{
int err;
@ -164,6 +194,9 @@ static int tcf_action_offload_cmd(struct flow_offload_action *fl_act,
if (err < 0)
return err;
if (hw_count)
*hw_count = err;
return 0;
}
@ -171,12 +204,17 @@ static int tcf_action_offload_cmd(struct flow_offload_action *fl_act,
static int tcf_action_offload_add(struct tc_action *action,
struct netlink_ext_ack *extack)
{
bool skip_sw = tc_act_skip_sw(action->tcfa_flags);
struct tc_action *actions[TCA_ACT_MAX_PRIO] = {
[0] = action,
};
struct flow_offload_action *fl_action;
u32 in_hw_count = 0;
int num, err = 0;
if (tc_act_skip_hw(action->tcfa_flags))
return 0;
num = tcf_offload_act_num_actions_single(action);
fl_action = offload_action_alloc(num);
if (!fl_action)
@ -193,7 +231,13 @@ static int tcf_action_offload_add(struct tc_action *action,
goto fl_err;
}
err = tcf_action_offload_cmd(fl_action, extack);
err = tcf_action_offload_cmd(fl_action, &in_hw_count, extack);
if (!err)
offload_action_hw_count_set(action, in_hw_count);
if (skip_sw && !tc_act_in_hw(action))
err = -EINVAL;
tc_cleanup_offload_action(&fl_action->action);
fl_err:
@ -205,13 +249,24 @@ fl_err:
static int tcf_action_offload_del(struct tc_action *action)
{
struct flow_offload_action fl_act = {};
u32 in_hw_count = 0;
int err = 0;
if (!tc_act_in_hw(action))
return 0;
err = offload_action_init(&fl_act, action, FLOW_ACT_DESTROY, NULL);
if (err)
return err;
return tcf_action_offload_cmd(&fl_act, NULL);
err = tcf_action_offload_cmd(&fl_act, &in_hw_count, NULL);
if (err)
return err;
if (action->in_hw_count != in_hw_count)
return -EINVAL;
return 0;
}
static void tcf_action_cleanup(struct tc_action *p)
@ -821,6 +876,9 @@ restart_act_graph:
jmp_prgcnt -= 1;
continue;
}
if (tc_act_skip_sw(a->tcfa_flags))
continue;
repeat:
ret = a->ops->act(skb, a, res);
if (ret == TC_ACT_REPEAT)
@ -926,6 +984,9 @@ tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
a->tcfa_flags, a->tcfa_flags))
goto nla_put_failure;
if (nla_put_u32(skb, TCA_ACT_IN_HW_COUNT, a->in_hw_count))
goto nla_put_failure;
nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
if (nest == NULL)
goto nla_put_failure;
@ -1005,7 +1066,9 @@ static const struct nla_policy tcf_action_policy[TCA_ACT_MAX + 1] = {
[TCA_ACT_COOKIE] = { .type = NLA_BINARY,
.len = TC_COOKIE_MAX_SIZE },
[TCA_ACT_OPTIONS] = { .type = NLA_NESTED },
[TCA_ACT_FLAGS] = NLA_POLICY_BITFIELD32(TCA_ACT_FLAGS_NO_PERCPU_STATS),
[TCA_ACT_FLAGS] = NLA_POLICY_BITFIELD32(TCA_ACT_FLAGS_NO_PERCPU_STATS |
TCA_ACT_FLAGS_SKIP_HW |
TCA_ACT_FLAGS_SKIP_SW),
[TCA_ACT_HW_STATS] = NLA_POLICY_BITFIELD32(TCA_ACT_HW_STATS_ANY),
};
@ -1118,8 +1181,13 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
}
}
hw_stats = tcf_action_hw_stats_get(tb[TCA_ACT_HW_STATS]);
if (tb[TCA_ACT_FLAGS])
if (tb[TCA_ACT_FLAGS]) {
userflags = nla_get_bitfield32(tb[TCA_ACT_FLAGS]);
if (!tc_act_flags_valid(userflags.value)) {
err = -EINVAL;
goto err_out;
}
}
err = a_o->init(net, tb[TCA_ACT_OPTIONS], est, &a, tp,
userflags.value | flags, extack);
@ -1194,8 +1262,11 @@ int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla,
sz += tcf_action_fill_size(act);
/* Start from index 0 */
actions[i - 1] = act;
if (!tc_act_bind(flags))
tcf_action_offload_add(act, extack);
if (!tc_act_bind(flags)) {
err = tcf_action_offload_add(act, extack);
if (tc_act_skip_sw(act->tcfa_flags) && err)
goto err;
}
}
/* We have to commit them all together, because if any error happened in