cachefiles: never get a new anonymous fd if ondemand_id is valid

[ Upstream commit 4988e35e95 ]

Now every time the daemon reads an open request, it gets a new anonymous fd
and ondemand_id. With the introduction of "restore", it is possible to read
the same open request more than once, and therefore an object can have more
than one anonymous fd.

If the anonymous fd is not unique, the following concurrencies will result
in an fd leak:

     t1     |         t2         |          t3
------------------------------------------------------------
 cachefiles_ondemand_init_object
  cachefiles_ondemand_send_req
   REQ_A = kzalloc(sizeof(*req) + data_len)
   wait_for_completion(&REQ_A->done)
            cachefiles_daemon_read
             cachefiles_ondemand_daemon_read
              REQ_A = cachefiles_ondemand_select_req
              cachefiles_ondemand_get_fd
                load->fd = fd0
                ondemand_id = object_id0
                                  ------ restore ------
                                  cachefiles_ondemand_restore
                                   // restore REQ_A
                                  cachefiles_daemon_read
                                   cachefiles_ondemand_daemon_read
                                    REQ_A = cachefiles_ondemand_select_req
                                      cachefiles_ondemand_get_fd
                                        load->fd = fd1
                                        ondemand_id = object_id1
             process_open_req(REQ_A)
             write(devfd, ("copen %u,%llu", msg->msg_id, size))
             cachefiles_ondemand_copen
              xa_erase(&cache->reqs, id)
              complete(&REQ_A->done)
   kfree(REQ_A)
                                  process_open_req(REQ_A)
                                  // copen fails due to no req
                                  // daemon close(fd1)
                                  cachefiles_ondemand_fd_release
                                   // set object closed
 -- umount --
 cachefiles_withdraw_cookie
  cachefiles_ondemand_clean_object
   cachefiles_ondemand_init_close_req
    if (!cachefiles_ondemand_object_is_open(object))
      return -ENOENT;
    // The fd0 is not closed until the daemon exits.

However, the anonymous fd holds the reference count of the object and the
object holds the reference count of the cookie. So even though the cookie
has been relinquished, it will not be unhashed and freed until the daemon
exits.

In fscache_hash_cookie(), when the same cookie is found in the hash list,
if the cookie is set with the FSCACHE_COOKIE_RELINQUISHED bit, then the new
cookie waits for the old cookie to be unhashed, while the old cookie is
waiting for the leaked fd to be closed, if the daemon does not exit in time
it will trigger a hung task.

To avoid this, allocate a new anonymous fd only if no anonymous fd has
been allocated (ondemand_id == 0) or if the previously allocated anonymous
fd has been closed (ondemand_id == -1). Moreover, returns an error if
ondemand_id is valid, letting the daemon know that the current userland
restore logic is abnormal and needs to be checked.

Fixes: c838305450 ("cachefiles: notify the user daemon when looking up cookie")
Signed-off-by: Baokun Li <libaokun1@huawei.com>
Link: https://lore.kernel.org/r/20240522114308.2402121-9-libaokun@huaweicloud.com
Acked-by: Jeff Layton <jlayton@kernel.org>
Signed-off-by: Christian Brauner <brauner@kernel.org>
Stable-dep-of: 4b4391e77a ("cachefiles: defer exposing anon_fd until after copy_to_user() succeeds")
Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
Baokun Li 2024-05-22 19:43:04 +08:00 committed by Greg Kroah-Hartman
parent 1d95e5010c
commit 527db1cb4c

View File

@ -14,11 +14,18 @@ static int cachefiles_ondemand_fd_release(struct inode *inode,
struct file *file)
{
struct cachefiles_object *object = file->private_data;
struct cachefiles_cache *cache = object->volume->cache;
struct cachefiles_ondemand_info *info = object->ondemand;
struct cachefiles_cache *cache;
struct cachefiles_ondemand_info *info;
int object_id;
struct cachefiles_req *req;
XA_STATE(xas, &cache->reqs, 0);
XA_STATE(xas, NULL, 0);
if (!object)
return 0;
info = object->ondemand;
cache = object->volume->cache;
xas.xa = &cache->reqs;
xa_lock(&cache->reqs);
spin_lock(&info->lock);
@ -275,22 +282,39 @@ static int cachefiles_ondemand_get_fd(struct cachefiles_req *req)
goto err_put_fd;
}
spin_lock(&object->ondemand->lock);
if (object->ondemand->ondemand_id > 0) {
spin_unlock(&object->ondemand->lock);
/* Pair with check in cachefiles_ondemand_fd_release(). */
file->private_data = NULL;
ret = -EEXIST;
goto err_put_file;
}
file->f_mode |= FMODE_PWRITE | FMODE_LSEEK;
fd_install(fd, file);
load = (void *)req->msg.data;
load->fd = fd;
object->ondemand->ondemand_id = object_id;
spin_unlock(&object->ondemand->lock);
cachefiles_get_unbind_pincount(cache);
trace_cachefiles_ondemand_open(object, &req->msg, load);
return 0;
err_put_file:
fput(file);
err_put_fd:
put_unused_fd(fd);
err_free_id:
xa_erase(&cache->ondemand_ids, object_id);
err:
spin_lock(&object->ondemand->lock);
/* Avoid marking an opened object as closed. */
if (object->ondemand->ondemand_id <= 0)
cachefiles_ondemand_set_object_close(object);
spin_unlock(&object->ondemand->lock);
cachefiles_put_object(object, cachefiles_obj_put_ondemand_fd);
return ret;
}
@ -373,10 +397,8 @@ ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache,
if (msg->opcode == CACHEFILES_OP_OPEN) {
ret = cachefiles_ondemand_get_fd(req);
if (ret) {
cachefiles_ondemand_set_object_close(req->object);
if (ret)
goto out;
}
}
msg->msg_id = xas.xa_index;