mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-26 05:34:13 +08:00
bpf: introduce update_effective_progs()
__cgroup_bpf_attach() and __cgroup_bpf_detach() functions have a good amount of duplicated code, which is possible to eliminate by introducing the update_effective_progs() helper function. The update_effective_progs() calls compute_effective_progs() and then in case of success it calls activate_effective_progs() for each descendant cgroup. In case of failure (OOM), it releases allocated prog arrays and return the error code. Signed-off-by: Roman Gushchin <guro@fb.com> Cc: Alexei Starovoitov <ast@kernel.org> Cc: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Song Liu <songliubraving@fb.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
This commit is contained in:
parent
0c26159352
commit
85fc4b16aa
@ -177,6 +177,45 @@ cleanup:
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static int update_effective_progs(struct cgroup *cgrp,
|
||||
enum bpf_attach_type type)
|
||||
{
|
||||
struct cgroup_subsys_state *css;
|
||||
int err;
|
||||
|
||||
/* allocate and recompute effective prog arrays */
|
||||
css_for_each_descendant_pre(css, &cgrp->self) {
|
||||
struct cgroup *desc = container_of(css, struct cgroup, self);
|
||||
|
||||
err = compute_effective_progs(desc, type, &desc->bpf.inactive);
|
||||
if (err)
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
/* all allocations were successful. Activate all prog arrays */
|
||||
css_for_each_descendant_pre(css, &cgrp->self) {
|
||||
struct cgroup *desc = container_of(css, struct cgroup, self);
|
||||
|
||||
activate_effective_progs(desc, type, desc->bpf.inactive);
|
||||
desc->bpf.inactive = NULL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
cleanup:
|
||||
/* oom while computing effective. Free all computed effective arrays
|
||||
* since they were not activated
|
||||
*/
|
||||
css_for_each_descendant_pre(css, &cgrp->self) {
|
||||
struct cgroup *desc = container_of(css, struct cgroup, self);
|
||||
|
||||
bpf_prog_array_free(desc->bpf.inactive);
|
||||
desc->bpf.inactive = NULL;
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
#define BPF_CGROUP_MAX_PROGS 64
|
||||
|
||||
/**
|
||||
@ -194,7 +233,6 @@ int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
|
||||
struct list_head *progs = &cgrp->bpf.progs[type];
|
||||
struct bpf_prog *old_prog = NULL;
|
||||
struct bpf_cgroup_storage *storage, *old_storage = NULL;
|
||||
struct cgroup_subsys_state *css;
|
||||
struct bpf_prog_list *pl;
|
||||
bool pl_was_allocated;
|
||||
int err;
|
||||
@ -261,22 +299,9 @@ int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
|
||||
|
||||
cgrp->bpf.flags[type] = flags;
|
||||
|
||||
/* allocate and recompute effective prog arrays */
|
||||
css_for_each_descendant_pre(css, &cgrp->self) {
|
||||
struct cgroup *desc = container_of(css, struct cgroup, self);
|
||||
|
||||
err = compute_effective_progs(desc, type, &desc->bpf.inactive);
|
||||
if (err)
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
/* all allocations were successful. Activate all prog arrays */
|
||||
css_for_each_descendant_pre(css, &cgrp->self) {
|
||||
struct cgroup *desc = container_of(css, struct cgroup, self);
|
||||
|
||||
activate_effective_progs(desc, type, desc->bpf.inactive);
|
||||
desc->bpf.inactive = NULL;
|
||||
}
|
||||
err = update_effective_progs(cgrp, type);
|
||||
if (err)
|
||||
goto cleanup;
|
||||
|
||||
static_branch_inc(&cgroup_bpf_enabled_key);
|
||||
if (old_storage)
|
||||
@ -289,16 +314,6 @@ int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
|
||||
return 0;
|
||||
|
||||
cleanup:
|
||||
/* oom while computing effective. Free all computed effective arrays
|
||||
* since they were not activated
|
||||
*/
|
||||
css_for_each_descendant_pre(css, &cgrp->self) {
|
||||
struct cgroup *desc = container_of(css, struct cgroup, self);
|
||||
|
||||
bpf_prog_array_free(desc->bpf.inactive);
|
||||
desc->bpf.inactive = NULL;
|
||||
}
|
||||
|
||||
/* and cleanup the prog list */
|
||||
pl->prog = old_prog;
|
||||
bpf_cgroup_storage_free(pl->storage);
|
||||
@ -326,7 +341,6 @@ int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
|
||||
struct list_head *progs = &cgrp->bpf.progs[type];
|
||||
u32 flags = cgrp->bpf.flags[type];
|
||||
struct bpf_prog *old_prog = NULL;
|
||||
struct cgroup_subsys_state *css;
|
||||
struct bpf_prog_list *pl;
|
||||
int err;
|
||||
|
||||
@ -365,22 +379,9 @@ int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
|
||||
pl->prog = NULL;
|
||||
}
|
||||
|
||||
/* allocate and recompute effective prog arrays */
|
||||
css_for_each_descendant_pre(css, &cgrp->self) {
|
||||
struct cgroup *desc = container_of(css, struct cgroup, self);
|
||||
|
||||
err = compute_effective_progs(desc, type, &desc->bpf.inactive);
|
||||
if (err)
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
/* all allocations were successful. Activate all prog arrays */
|
||||
css_for_each_descendant_pre(css, &cgrp->self) {
|
||||
struct cgroup *desc = container_of(css, struct cgroup, self);
|
||||
|
||||
activate_effective_progs(desc, type, desc->bpf.inactive);
|
||||
desc->bpf.inactive = NULL;
|
||||
}
|
||||
err = update_effective_progs(cgrp, type);
|
||||
if (err)
|
||||
goto cleanup;
|
||||
|
||||
/* now can actually delete it from this cgroup list */
|
||||
list_del(&pl->node);
|
||||
@ -396,16 +397,6 @@ int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
|
||||
return 0;
|
||||
|
||||
cleanup:
|
||||
/* oom while computing effective. Free all computed effective arrays
|
||||
* since they were not activated
|
||||
*/
|
||||
css_for_each_descendant_pre(css, &cgrp->self) {
|
||||
struct cgroup *desc = container_of(css, struct cgroup, self);
|
||||
|
||||
bpf_prog_array_free(desc->bpf.inactive);
|
||||
desc->bpf.inactive = NULL;
|
||||
}
|
||||
|
||||
/* and restore back old_prog */
|
||||
pl->prog = old_prog;
|
||||
return err;
|
||||
|
Loading…
Reference in New Issue
Block a user