mm/damon/core: make damon_start() waits until kdamond_fn() starts

[ Upstream commit 6376a82459 ]

The cleanup tasks of kdamond threads including reset of corresponding
DAMON context's ->kdamond field and decrease of global nr_running_ctxs
counter is supposed to be executed by kdamond_fn().  However, commit
0f91d13366 ("mm/damon: simplify stop mechanism") made neither
damon_start() nor damon_stop() ensure the corresponding kdamond has
started the execution of kdamond_fn().

As a result, the cleanup can be skipped if damon_stop() is called fast
enough after the previous damon_start().  Especially the skipped reset
of ->kdamond could cause a use-after-free.

Fix it by waiting for start of kdamond_fn() execution from
damon_start().

Link: https://lkml.kernel.org/r/20231208175018.63880-1-sj@kernel.org
Fixes: 0f91d13366 ("mm/damon: simplify stop mechanism")
Signed-off-by: SeongJae Park <sj@kernel.org>
Reported-by: Jakub Acs <acsjakub@amazon.de>
Cc: Changbin Du <changbin.du@intel.com>
Cc: Jakub Acs <acsjakub@amazon.de>
Cc: <stable@vger.kernel.org> # 5.15.x
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
SeongJae Park 2023-12-08 17:50:18 +00:00 committed by Greg Kroah-Hartman
parent c708a5e51b
commit e93bcaebda
2 changed files with 8 additions and 0 deletions

View File

@ -534,6 +534,8 @@ struct damon_ctx {
* update * update
*/ */
unsigned long next_ops_update_sis; unsigned long next_ops_update_sis;
/* for waiting until the execution of the kdamond_fn is started */
struct completion kdamond_started;
/* public: */ /* public: */
struct task_struct *kdamond; struct task_struct *kdamond;

View File

@ -423,6 +423,8 @@ struct damon_ctx *damon_new_ctx(void)
if (!ctx) if (!ctx)
return NULL; return NULL;
init_completion(&ctx->kdamond_started);
ctx->attrs.sample_interval = 5 * 1000; ctx->attrs.sample_interval = 5 * 1000;
ctx->attrs.aggr_interval = 100 * 1000; ctx->attrs.aggr_interval = 100 * 1000;
ctx->attrs.ops_update_interval = 60 * 1000 * 1000; ctx->attrs.ops_update_interval = 60 * 1000 * 1000;
@ -636,11 +638,14 @@ static int __damon_start(struct damon_ctx *ctx)
mutex_lock(&ctx->kdamond_lock); mutex_lock(&ctx->kdamond_lock);
if (!ctx->kdamond) { if (!ctx->kdamond) {
err = 0; err = 0;
reinit_completion(&ctx->kdamond_started);
ctx->kdamond = kthread_run(kdamond_fn, ctx, "kdamond.%d", ctx->kdamond = kthread_run(kdamond_fn, ctx, "kdamond.%d",
nr_running_ctxs); nr_running_ctxs);
if (IS_ERR(ctx->kdamond)) { if (IS_ERR(ctx->kdamond)) {
err = PTR_ERR(ctx->kdamond); err = PTR_ERR(ctx->kdamond);
ctx->kdamond = NULL; ctx->kdamond = NULL;
} else {
wait_for_completion(&ctx->kdamond_started);
} }
} }
mutex_unlock(&ctx->kdamond_lock); mutex_unlock(&ctx->kdamond_lock);
@ -1347,6 +1352,7 @@ static int kdamond_fn(void *data)
pr_debug("kdamond (%d) starts\n", current->pid); pr_debug("kdamond (%d) starts\n", current->pid);
complete(&ctx->kdamond_started);
kdamond_init_intervals_sis(ctx); kdamond_init_intervals_sis(ctx);
if (ctx->ops.init) if (ctx->ops.init)