2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-16 09:13:55 +08:00

binder: fix use-after-free due to ksys_close() during fdget()

44d8047f1d ("binder: use standard functions to allocate fds")
exposed a pre-existing issue in the binder driver.

fdget() is used in ksys_ioctl() as a performance optimization.
One of the rules associated with fdget() is that ksys_close() must
not be called between the fdget() and the fdput(). There is a case
where this requirement is not met in the binder driver which results
in the reference count dropping to 0 when the device is still in
use. This can result in use-after-free or other issues.

If userpace has passed a file-descriptor for the binder driver using
a BINDER_TYPE_FDA object, then kys_close() is called on it when
handling a binder_ioctl(BC_FREE_BUFFER) command. This violates
the assumptions for using fdget().

The problem is fixed by deferring the close using task_work_add(). A
new variant of __close_fd() was created that returns a struct file
with a reference. The fput() is deferred instead of using ksys_close().

Fixes: 44d8047f1d ("binder: use standard functions to allocate fds")
Suggested-by: Al Viro <viro@zeniv.linux.org.uk>
Signed-off-by: Todd Kjos <tkjos@google.com>
Cc: stable <stable@vger.kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Todd Kjos 2018-12-14 15:58:21 -08:00 committed by Greg Kroah-Hartman
parent 2701e804f0
commit 80cd795630
3 changed files with 91 additions and 2 deletions

View File

@ -72,6 +72,7 @@
#include <linux/spinlock.h>
#include <linux/ratelimit.h>
#include <linux/syscalls.h>
#include <linux/task_work.h>
#include <uapi/linux/android/binder.h>
@ -2170,6 +2171,64 @@ static bool binder_validate_fixup(struct binder_buffer *b,
return (fixup_offset >= last_min_offset);
}
/**
* struct binder_task_work_cb - for deferred close
*
* @twork: callback_head for task work
* @fd: fd to close
*
* Structure to pass task work to be handled after
* returning from binder_ioctl() via task_work_add().
*/
struct binder_task_work_cb {
struct callback_head twork;
struct file *file;
};
/**
* binder_do_fd_close() - close list of file descriptors
* @twork: callback head for task work
*
* It is not safe to call ksys_close() during the binder_ioctl()
* function if there is a chance that binder's own file descriptor
* might be closed. This is to meet the requirements for using
* fdget() (see comments for __fget_light()). Therefore use
* task_work_add() to schedule the close operation once we have
* returned from binder_ioctl(). This function is a callback
* for that mechanism and does the actual ksys_close() on the
* given file descriptor.
*/
static void binder_do_fd_close(struct callback_head *twork)
{
struct binder_task_work_cb *twcb = container_of(twork,
struct binder_task_work_cb, twork);
fput(twcb->file);
kfree(twcb);
}
/**
* binder_deferred_fd_close() - schedule a close for the given file-descriptor
* @fd: file-descriptor to close
*
* See comments in binder_do_fd_close(). This function is used to schedule
* a file-descriptor to be closed after returning from binder_ioctl().
*/
static void binder_deferred_fd_close(int fd)
{
struct binder_task_work_cb *twcb;
twcb = kzalloc(sizeof(*twcb), GFP_KERNEL);
if (!twcb)
return;
init_task_work(&twcb->twork, binder_do_fd_close);
__close_fd_get_file(fd, &twcb->file);
if (twcb->file)
task_work_add(current, &twcb->twork, true);
else
kfree(twcb);
}
static void binder_transaction_buffer_release(struct binder_proc *proc,
struct binder_buffer *buffer,
binder_size_t *failed_at)
@ -2309,7 +2368,7 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
}
fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
for (fd_index = 0; fd_index < fda->num_fds; fd_index++)
ksys_close(fd_array[fd_index]);
binder_deferred_fd_close(fd_array[fd_index]);
} break;
default:
pr_err("transaction release %d bad object type %x\n",
@ -3928,7 +3987,7 @@ static int binder_apply_fd_fixups(struct binder_transaction *t)
} else if (ret) {
u32 *fdp = (u32 *)(t->buffer->data + fixup->offset);
ksys_close(*fdp);
binder_deferred_fd_close(*fdp);
}
list_del(&fixup->fixup_entry);
kfree(fixup);

View File

@ -640,6 +640,35 @@ out_unlock:
}
EXPORT_SYMBOL(__close_fd); /* for ksys_close() */
/*
* variant of __close_fd that gets a ref on the file for later fput
*/
int __close_fd_get_file(unsigned int fd, struct file **res)
{
struct files_struct *files = current->files;
struct file *file;
struct fdtable *fdt;
spin_lock(&files->file_lock);
fdt = files_fdtable(files);
if (fd >= fdt->max_fds)
goto out_unlock;
file = fdt->fd[fd];
if (!file)
goto out_unlock;
rcu_assign_pointer(fdt->fd[fd], NULL);
__put_unused_fd(files, fd);
spin_unlock(&files->file_lock);
get_file(file);
*res = file;
return filp_close(file, files);
out_unlock:
spin_unlock(&files->file_lock);
*res = NULL;
return -ENOENT;
}
void do_close_on_exec(struct files_struct *files)
{
unsigned i;

View File

@ -121,6 +121,7 @@ extern void __fd_install(struct files_struct *files,
unsigned int fd, struct file *file);
extern int __close_fd(struct files_struct *files,
unsigned int fd);
extern int __close_fd_get_file(unsigned int fd, struct file **res);
extern struct kmem_cache *files_cachep;