2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-22 12:14:01 +08:00

binder: separate binder allocator structure from binder proc

The binder allocator is logically separate from the rest
of the binder drivers. Separating the data structures
to prepare for splitting into separate file with separate
locking.

Signed-off-by: Todd Kjos <tkjos@google.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Todd Kjos 2017-06-29 12:01:38 -07:00 committed by Greg Kroah-Hartman
parent 00b40d6133
commit fdfb4a99b6
2 changed files with 129 additions and 85 deletions

View File

@ -319,6 +319,41 @@ enum binder_deferred_state {
BINDER_DEFERRED_RELEASE = 0x04, BINDER_DEFERRED_RELEASE = 0x04,
}; };
/**
* struct binder_alloc - per-binder proc state for binder allocator
* @vma: vm_area_struct passed to mmap_handler
* (invarient after mmap)
* @vma_vm_mm: copy of vma->vm_mm (invarient after mmap)
* @buffer: base of per-proc address space mapped via mmap
* @user_buffer_offset: offset between user and kernel VAs for buffer
* @buffers: list of all buffers for this proc
* @free_buffers: rb tree of buffers available for allocation
* sorted by size
* @allocated_buffers: rb tree of allocated buffers sorted by address
* @free_async_space: VA space available for async buffers. This is
* initialized at mmap time to 1/2 the full VA space
* @pages: array of physical page addresses for each page of
* mmap'd space
* @buffer_size: size of address space (could be less than requested)
*
* Bookkeeping structure for per-proc address space management for binder
* buffers. It is normally initialized during binder_init() and binder_mmap()
* calls. The address space is used for both user-visible buffers and for
* struct binder_buffer objects used to track the user buffers
*/
struct binder_alloc {
struct vm_area_struct *vma;
struct mm_struct *vma_vm_mm;
void *buffer;
ptrdiff_t user_buffer_offset;
struct list_head buffers;
struct rb_root free_buffers;
struct rb_root allocated_buffers;
size_t free_async_space;
struct page **pages;
size_t buffer_size;
};
struct binder_proc { struct binder_proc {
struct hlist_node proc_node; struct hlist_node proc_node;
struct rb_root threads; struct rb_root threads;
@ -326,23 +361,11 @@ struct binder_proc {
struct rb_root refs_by_desc; struct rb_root refs_by_desc;
struct rb_root refs_by_node; struct rb_root refs_by_node;
int pid; int pid;
struct vm_area_struct *vma;
struct mm_struct *vma_vm_mm;
struct task_struct *tsk; struct task_struct *tsk;
struct files_struct *files; struct files_struct *files;
struct hlist_node deferred_work_node; struct hlist_node deferred_work_node;
int deferred_work; int deferred_work;
void *buffer;
ptrdiff_t user_buffer_offset;
struct list_head buffers;
struct rb_root free_buffers;
struct rb_root allocated_buffers;
size_t free_async_space;
struct page **pages;
size_t buffer_size;
uint32_t buffer_free;
struct list_head todo; struct list_head todo;
wait_queue_head_t wait; wait_queue_head_t wait;
struct binder_stats stats; struct binder_stats stats;
@ -353,6 +376,7 @@ struct binder_proc {
int ready_threads; int ready_threads;
long default_priority; long default_priority;
struct dentry *debugfs_entry; struct dentry *debugfs_entry;
struct binder_alloc alloc;
struct binder_context *context; struct binder_context *context;
}; };
@ -485,8 +509,10 @@ static void binder_set_nice(long nice)
static size_t binder_buffer_size(struct binder_proc *proc, static size_t binder_buffer_size(struct binder_proc *proc,
struct binder_buffer *buffer) struct binder_buffer *buffer)
{ {
if (list_is_last(&buffer->entry, &proc->buffers)) if (list_is_last(&buffer->entry, &proc->alloc.buffers))
return proc->buffer + proc->buffer_size - (void *)buffer->data; return proc->alloc.buffer +
proc->alloc.buffer_size -
(void *)buffer->data;
return (size_t)list_entry(buffer->entry.next, return (size_t)list_entry(buffer->entry.next,
struct binder_buffer, entry) - (size_t)buffer->data; struct binder_buffer, entry) - (size_t)buffer->data;
} }
@ -494,7 +520,7 @@ static size_t binder_buffer_size(struct binder_proc *proc,
static void binder_insert_free_buffer(struct binder_proc *proc, static void binder_insert_free_buffer(struct binder_proc *proc,
struct binder_buffer *new_buffer) struct binder_buffer *new_buffer)
{ {
struct rb_node **p = &proc->free_buffers.rb_node; struct rb_node **p = &proc->alloc.free_buffers.rb_node;
struct rb_node *parent = NULL; struct rb_node *parent = NULL;
struct binder_buffer *buffer; struct binder_buffer *buffer;
size_t buffer_size; size_t buffer_size;
@ -521,13 +547,13 @@ static void binder_insert_free_buffer(struct binder_proc *proc,
p = &parent->rb_right; p = &parent->rb_right;
} }
rb_link_node(&new_buffer->rb_node, parent, p); rb_link_node(&new_buffer->rb_node, parent, p);
rb_insert_color(&new_buffer->rb_node, &proc->free_buffers); rb_insert_color(&new_buffer->rb_node, &proc->alloc.free_buffers);
} }
static void binder_insert_allocated_buffer(struct binder_proc *proc, static void binder_insert_allocated_buffer(struct binder_proc *proc,
struct binder_buffer *new_buffer) struct binder_buffer *new_buffer)
{ {
struct rb_node **p = &proc->allocated_buffers.rb_node; struct rb_node **p = &proc->alloc.allocated_buffers.rb_node;
struct rb_node *parent = NULL; struct rb_node *parent = NULL;
struct binder_buffer *buffer; struct binder_buffer *buffer;
@ -546,18 +572,19 @@ static void binder_insert_allocated_buffer(struct binder_proc *proc,
BUG(); BUG();
} }
rb_link_node(&new_buffer->rb_node, parent, p); rb_link_node(&new_buffer->rb_node, parent, p);
rb_insert_color(&new_buffer->rb_node, &proc->allocated_buffers); rb_insert_color(&new_buffer->rb_node, &proc->alloc.allocated_buffers);
} }
static struct binder_buffer *binder_buffer_lookup(struct binder_proc *proc, static struct binder_buffer *binder_buffer_lookup(struct binder_proc *proc,
uintptr_t user_ptr) uintptr_t user_ptr)
{ {
struct rb_node *n = proc->allocated_buffers.rb_node; struct rb_node *n = proc->alloc.allocated_buffers.rb_node;
struct binder_buffer *buffer; struct binder_buffer *buffer;
struct binder_buffer *kern_ptr; struct binder_buffer *kern_ptr;
kern_ptr = (struct binder_buffer *)(user_ptr - proc->user_buffer_offset kern_ptr = (struct binder_buffer *)
- offsetof(struct binder_buffer, data)); (user_ptr - proc->alloc.user_buffer_offset -
offsetof(struct binder_buffer, data));
while (n) { while (n) {
buffer = rb_entry(n, struct binder_buffer, rb_node); buffer = rb_entry(n, struct binder_buffer, rb_node);
@ -598,8 +625,8 @@ static int binder_update_page_range(struct binder_proc *proc, int allocate,
if (mm) { if (mm) {
down_write(&mm->mmap_sem); down_write(&mm->mmap_sem);
vma = proc->vma; vma = proc->alloc.vma;
if (vma && mm != proc->vma_vm_mm) { if (vma && mm != proc->alloc.vma_vm_mm) {
pr_err("%d: vma mm and task mm mismatch\n", pr_err("%d: vma mm and task mm mismatch\n",
proc->pid); proc->pid);
vma = NULL; vma = NULL;
@ -618,7 +645,8 @@ static int binder_update_page_range(struct binder_proc *proc, int allocate,
for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) { for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
int ret; int ret;
page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE]; page = &proc->alloc.pages[
(page_addr - proc->alloc.buffer) / PAGE_SIZE];
BUG_ON(*page); BUG_ON(*page);
*page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO); *page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
@ -637,7 +665,7 @@ static int binder_update_page_range(struct binder_proc *proc, int allocate,
goto err_map_kernel_failed; goto err_map_kernel_failed;
} }
user_page_addr = user_page_addr =
(uintptr_t)page_addr + proc->user_buffer_offset; (uintptr_t)page_addr + proc->alloc.user_buffer_offset;
ret = vm_insert_page(vma, user_page_addr, page[0]); ret = vm_insert_page(vma, user_page_addr, page[0]);
if (ret) { if (ret) {
pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n", pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
@ -655,10 +683,11 @@ static int binder_update_page_range(struct binder_proc *proc, int allocate,
free_range: free_range:
for (page_addr = end - PAGE_SIZE; page_addr >= start; for (page_addr = end - PAGE_SIZE; page_addr >= start;
page_addr -= PAGE_SIZE) { page_addr -= PAGE_SIZE) {
page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE]; page = &proc->alloc.pages[
(page_addr - proc->alloc.buffer) / PAGE_SIZE];
if (vma) if (vma)
zap_page_range(vma, (uintptr_t)page_addr + zap_page_range(vma, (uintptr_t)page_addr +
proc->user_buffer_offset, PAGE_SIZE); proc->alloc.user_buffer_offset, PAGE_SIZE);
err_vm_insert_page_failed: err_vm_insert_page_failed:
unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE); unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
err_map_kernel_failed: err_map_kernel_failed:
@ -681,7 +710,7 @@ static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc,
size_t extra_buffers_size, size_t extra_buffers_size,
int is_async) int is_async)
{ {
struct rb_node *n = proc->free_buffers.rb_node; struct rb_node *n = proc->alloc.free_buffers.rb_node;
struct binder_buffer *buffer; struct binder_buffer *buffer;
size_t buffer_size; size_t buffer_size;
struct rb_node *best_fit = NULL; struct rb_node *best_fit = NULL;
@ -689,7 +718,7 @@ static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc,
void *end_page_addr; void *end_page_addr;
size_t size, data_offsets_size; size_t size, data_offsets_size;
if (proc->vma == NULL) { if (proc->alloc.vma == NULL) {
pr_err("%d: binder_alloc_buf, no vma\n", pr_err("%d: binder_alloc_buf, no vma\n",
proc->pid); proc->pid);
return NULL; return NULL;
@ -710,7 +739,8 @@ static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc,
return NULL; return NULL;
} }
if (is_async && if (is_async &&
proc->free_async_space < size + sizeof(struct binder_buffer)) { proc->alloc.free_async_space <
size + sizeof(struct binder_buffer)) {
binder_debug(BINDER_DEBUG_BUFFER_ALLOC, binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%d: binder_alloc_buf size %zd failed, no async space left\n", "%d: binder_alloc_buf size %zd failed, no async space left\n",
proc->pid, size); proc->pid, size);
@ -762,7 +792,7 @@ static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc,
(void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL)) (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL))
return NULL; return NULL;
rb_erase(best_fit, &proc->free_buffers); rb_erase(best_fit, &proc->alloc.free_buffers);
buffer->free = 0; buffer->free = 0;
binder_insert_allocated_buffer(proc, buffer); binder_insert_allocated_buffer(proc, buffer);
if (buffer_size != size) { if (buffer_size != size) {
@ -780,10 +810,11 @@ static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc,
buffer->extra_buffers_size = extra_buffers_size; buffer->extra_buffers_size = extra_buffers_size;
buffer->async_transaction = is_async; buffer->async_transaction = is_async;
if (is_async) { if (is_async) {
proc->free_async_space -= size + sizeof(struct binder_buffer); proc->alloc.free_async_space -=
size + sizeof(struct binder_buffer);
binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC, binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
"%d: binder_alloc_buf size %zd async free %zd\n", "%d: binder_alloc_buf size %zd async free %zd\n",
proc->pid, size, proc->free_async_space); proc->pid, size, proc->alloc.free_async_space);
} }
return buffer; return buffer;
@ -806,7 +837,7 @@ static void binder_delete_free_buffer(struct binder_proc *proc,
int free_page_end = 1; int free_page_end = 1;
int free_page_start = 1; int free_page_start = 1;
BUG_ON(proc->buffers.next == &buffer->entry); BUG_ON(proc->alloc.buffers.next == &buffer->entry);
prev = list_entry(buffer->entry.prev, struct binder_buffer, entry); prev = list_entry(buffer->entry.prev, struct binder_buffer, entry);
BUG_ON(!prev->free); BUG_ON(!prev->free);
if (buffer_end_page(prev) == buffer_start_page(buffer)) { if (buffer_end_page(prev) == buffer_start_page(buffer)) {
@ -818,7 +849,7 @@ static void binder_delete_free_buffer(struct binder_proc *proc,
proc->pid, buffer, prev); proc->pid, buffer, prev);
} }
if (!list_is_last(&buffer->entry, &proc->buffers)) { if (!list_is_last(&buffer->entry, &proc->alloc.buffers)) {
next = list_entry(buffer->entry.next, next = list_entry(buffer->entry.next,
struct binder_buffer, entry); struct binder_buffer, entry);
if (buffer_start_page(next) == buffer_end_page(buffer)) { if (buffer_start_page(next) == buffer_end_page(buffer)) {
@ -862,39 +893,40 @@ static void binder_free_buf(struct binder_proc *proc,
BUG_ON(buffer->free); BUG_ON(buffer->free);
BUG_ON(size > buffer_size); BUG_ON(size > buffer_size);
BUG_ON(buffer->transaction != NULL); BUG_ON(buffer->transaction != NULL);
BUG_ON((void *)buffer < proc->buffer); BUG_ON((void *)buffer < proc->alloc.buffer);
BUG_ON((void *)buffer > proc->buffer + proc->buffer_size); BUG_ON((void *)buffer > proc->alloc.buffer + proc->alloc.buffer_size);
if (buffer->async_transaction) { if (buffer->async_transaction) {
proc->free_async_space += size + sizeof(struct binder_buffer); proc->alloc.free_async_space +=
size + sizeof(struct binder_buffer);
binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC, binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
"%d: binder_free_buf size %zd async free %zd\n", "%d: binder_free_buf size %zd async free %zd\n",
proc->pid, size, proc->free_async_space); proc->pid, size, proc->alloc.free_async_space);
} }
binder_update_page_range(proc, 0, binder_update_page_range(proc, 0,
(void *)PAGE_ALIGN((uintptr_t)buffer->data), (void *)PAGE_ALIGN((uintptr_t)buffer->data),
(void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK), (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK),
NULL); NULL);
rb_erase(&buffer->rb_node, &proc->allocated_buffers); rb_erase(&buffer->rb_node, &proc->alloc.allocated_buffers);
buffer->free = 1; buffer->free = 1;
if (!list_is_last(&buffer->entry, &proc->buffers)) { if (!list_is_last(&buffer->entry, &proc->alloc.buffers)) {
struct binder_buffer *next = list_entry(buffer->entry.next, struct binder_buffer *next = list_entry(buffer->entry.next,
struct binder_buffer, entry); struct binder_buffer, entry);
if (next->free) { if (next->free) {
rb_erase(&next->rb_node, &proc->free_buffers); rb_erase(&next->rb_node, &proc->alloc.free_buffers);
binder_delete_free_buffer(proc, next); binder_delete_free_buffer(proc, next);
} }
} }
if (proc->buffers.next != &buffer->entry) { if (proc->alloc.buffers.next != &buffer->entry) {
struct binder_buffer *prev = list_entry(buffer->entry.prev, struct binder_buffer *prev = list_entry(buffer->entry.prev,
struct binder_buffer, entry); struct binder_buffer, entry);
if (prev->free) { if (prev->free) {
binder_delete_free_buffer(proc, buffer); binder_delete_free_buffer(proc, buffer);
rb_erase(&prev->rb_node, &proc->free_buffers); rb_erase(&prev->rb_node, &proc->alloc.free_buffers);
buffer = prev; buffer = prev;
} }
} }
@ -1532,7 +1564,7 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
* back to kernel address space to access it * back to kernel address space to access it
*/ */
parent_buffer = parent->buffer - parent_buffer = parent->buffer -
proc->user_buffer_offset; proc->alloc.user_buffer_offset;
fd_buf_size = sizeof(u32) * fda->num_fds; fd_buf_size = sizeof(u32) * fda->num_fds;
if (fda->num_fds >= SIZE_MAX / sizeof(u32)) { if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
@ -1750,7 +1782,7 @@ static int binder_translate_fd_array(struct binder_fd_array_object *fda,
* Since the parent was already fixed up, convert it * Since the parent was already fixed up, convert it
* back to the kernel address space to access it * back to the kernel address space to access it
*/ */
parent_buffer = parent->buffer - target_proc->user_buffer_offset; parent_buffer = parent->buffer - target_proc->alloc.user_buffer_offset;
fd_array = (u32 *)(parent_buffer + fda->parent_offset); fd_array = (u32 *)(parent_buffer + fda->parent_offset);
if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) { if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) {
binder_user_error("%d:%d parent offset not aligned correctly.\n", binder_user_error("%d:%d parent offset not aligned correctly.\n",
@ -1818,7 +1850,7 @@ static int binder_fixup_parent(struct binder_transaction *t,
return -EINVAL; return -EINVAL;
} }
parent_buffer = (u8 *)(parent->buffer - parent_buffer = (u8 *)(parent->buffer -
target_proc->user_buffer_offset); target_proc->alloc.user_buffer_offset);
*(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer; *(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer;
return 0; return 0;
@ -2158,7 +2190,7 @@ static void binder_transaction(struct binder_proc *proc,
} }
/* Fixup buffer pointer to target proc address space */ /* Fixup buffer pointer to target proc address space */
bp->buffer = (uintptr_t)sg_bufp + bp->buffer = (uintptr_t)sg_bufp +
target_proc->user_buffer_offset; target_proc->alloc.user_buffer_offset;
sg_bufp += ALIGN(bp->length, sizeof(u64)); sg_bufp += ALIGN(bp->length, sizeof(u64));
ret = binder_fixup_parent(t, thread, bp, off_start, ret = binder_fixup_parent(t, thread, bp, off_start,
@ -2920,7 +2952,7 @@ retry:
tr.offsets_size = t->buffer->offsets_size; tr.offsets_size = t->buffer->offsets_size;
tr.data.ptr.buffer = (binder_uintptr_t)( tr.data.ptr.buffer = (binder_uintptr_t)(
(uintptr_t)t->buffer->data + (uintptr_t)t->buffer->data +
proc->user_buffer_offset); proc->alloc.user_buffer_offset);
tr.data.ptr.offsets = tr.data.ptr.buffer + tr.data.ptr.offsets = tr.data.ptr.buffer +
ALIGN(t->buffer->data_size, ALIGN(t->buffer->data_size,
sizeof(void *)); sizeof(void *));
@ -3338,8 +3370,8 @@ static void binder_vma_close(struct vm_area_struct *vma)
proc->pid, vma->vm_start, vma->vm_end, proc->pid, vma->vm_start, vma->vm_end,
(vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
(unsigned long)pgprot_val(vma->vm_page_prot)); (unsigned long)pgprot_val(vma->vm_page_prot));
proc->vma = NULL; proc->alloc.vma = NULL;
proc->vma_vm_mm = NULL; proc->alloc.vma_vm_mm = NULL;
binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES); binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
} }
@ -3382,7 +3414,7 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE; vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
mutex_lock(&binder_mmap_lock); mutex_lock(&binder_mmap_lock);
if (proc->buffer) { if (proc->alloc.buffer) {
ret = -EBUSY; ret = -EBUSY;
failure_string = "already mapped"; failure_string = "already mapped";
goto err_already_mapped; goto err_already_mapped;
@ -3394,56 +3426,66 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
failure_string = "get_vm_area"; failure_string = "get_vm_area";
goto err_get_vm_area_failed; goto err_get_vm_area_failed;
} }
proc->buffer = area->addr; proc->alloc.buffer = area->addr;
proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer; proc->alloc.user_buffer_offset =
vma->vm_start - (uintptr_t)proc->alloc.buffer;
mutex_unlock(&binder_mmap_lock); mutex_unlock(&binder_mmap_lock);
#ifdef CONFIG_CPU_CACHE_VIPT #ifdef CONFIG_CPU_CACHE_VIPT
if (cache_is_vipt_aliasing()) { if (cache_is_vipt_aliasing()) {
while (CACHE_COLOUR((vma->vm_start ^ (uint32_t)proc->buffer))) { while (CACHE_COLOUR(
pr_info("binder_mmap: %d %lx-%lx maps %p bad alignment\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer); (vma->vm_start ^ (uint32_t)proc->alloc.buffer))) {
pr_info("%s: %d %lx-%lx maps %pK bad alignment\n",
__func__,
proc->pid, vma->vm_start,
vma->vm_end, proc->alloc.buffer);
vma->vm_start += PAGE_SIZE; vma->vm_start += PAGE_SIZE;
} }
} }
#endif #endif
proc->pages = kzalloc(sizeof(proc->pages[0]) * ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL); proc->alloc.pages =
if (proc->pages == NULL) { kzalloc(sizeof(proc->alloc.pages[0]) *
((vma->vm_end - vma->vm_start) / PAGE_SIZE),
GFP_KERNEL);
if (proc->alloc.pages == NULL) {
ret = -ENOMEM; ret = -ENOMEM;
failure_string = "alloc page array"; failure_string = "alloc page array";
goto err_alloc_pages_failed; goto err_alloc_pages_failed;
} }
proc->buffer_size = vma->vm_end - vma->vm_start; proc->alloc.buffer_size = vma->vm_end - vma->vm_start;
vma->vm_ops = &binder_vm_ops; vma->vm_ops = &binder_vm_ops;
vma->vm_private_data = proc; vma->vm_private_data = proc;
if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)) { if (binder_update_page_range(proc, 1, proc->alloc.buffer,
proc->alloc.buffer + PAGE_SIZE, vma)) {
ret = -ENOMEM; ret = -ENOMEM;
failure_string = "alloc small buf"; failure_string = "alloc small buf";
goto err_alloc_small_buf_failed; goto err_alloc_small_buf_failed;
} }
buffer = proc->buffer; buffer = proc->alloc.buffer;
INIT_LIST_HEAD(&proc->buffers); INIT_LIST_HEAD(&proc->alloc.buffers);
list_add(&buffer->entry, &proc->buffers); list_add(&buffer->entry, &proc->alloc.buffers);
buffer->free = 1; buffer->free = 1;
binder_insert_free_buffer(proc, buffer); binder_insert_free_buffer(proc, buffer);
proc->free_async_space = proc->buffer_size / 2; proc->alloc.free_async_space = proc->alloc.buffer_size / 2;
barrier(); barrier();
proc->files = get_files_struct(current); proc->files = get_files_struct(current);
proc->vma = vma; proc->alloc.vma = vma;
proc->vma_vm_mm = vma->vm_mm; proc->alloc.vma_vm_mm = vma->vm_mm;
/*pr_info("binder_mmap: %d %lx-%lx maps %p\n", /*pr_info("binder_mmap: %d %lx-%lx maps %pK\n",
proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/ * proc->pid, vma->vm_start, vma->vm_end, proc->alloc.buffer);
*/
return 0; return 0;
err_alloc_small_buf_failed: err_alloc_small_buf_failed:
kfree(proc->pages); kfree(proc->alloc.pages);
proc->pages = NULL; proc->alloc.pages = NULL;
err_alloc_pages_failed: err_alloc_pages_failed:
mutex_lock(&binder_mmap_lock); mutex_lock(&binder_mmap_lock);
vfree(proc->buffer); vfree(proc->alloc.buffer);
proc->buffer = NULL; proc->alloc.buffer = NULL;
err_get_vm_area_failed: err_get_vm_area_failed:
err_already_mapped: err_already_mapped:
mutex_unlock(&binder_mmap_lock); mutex_unlock(&binder_mmap_lock);
@ -3595,7 +3637,7 @@ static void binder_deferred_release(struct binder_proc *proc)
int threads, nodes, incoming_refs, outgoing_refs, buffers, int threads, nodes, incoming_refs, outgoing_refs, buffers,
active_transactions, page_count; active_transactions, page_count;
BUG_ON(proc->vma); BUG_ON(proc->alloc.vma);
BUG_ON(proc->files); BUG_ON(proc->files);
hlist_del(&proc->proc_node); hlist_del(&proc->proc_node);
@ -3642,7 +3684,7 @@ static void binder_deferred_release(struct binder_proc *proc)
binder_release_work(&proc->delivered_death); binder_release_work(&proc->delivered_death);
buffers = 0; buffers = 0;
while ((n = rb_first(&proc->allocated_buffers))) { while ((n = rb_first(&proc->alloc.allocated_buffers))) {
struct binder_buffer *buffer; struct binder_buffer *buffer;
buffer = rb_entry(n, struct binder_buffer, rb_node); buffer = rb_entry(n, struct binder_buffer, rb_node);
@ -3663,25 +3705,25 @@ static void binder_deferred_release(struct binder_proc *proc)
binder_stats_deleted(BINDER_STAT_PROC); binder_stats_deleted(BINDER_STAT_PROC);
page_count = 0; page_count = 0;
if (proc->pages) { if (proc->alloc.pages) {
int i; int i;
for (i = 0; i < proc->buffer_size / PAGE_SIZE; i++) { for (i = 0; i < proc->alloc.buffer_size / PAGE_SIZE; i++) {
void *page_addr; void *page_addr;
if (!proc->pages[i]) if (!proc->alloc.pages[i])
continue; continue;
page_addr = proc->buffer + i * PAGE_SIZE; page_addr = proc->alloc.buffer + i * PAGE_SIZE;
binder_debug(BINDER_DEBUG_BUFFER_ALLOC, binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%s: %d: page %d at %p not freed\n", "%s: %d: page %d at %p not freed\n",
__func__, proc->pid, i, page_addr); __func__, proc->pid, i, page_addr);
unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE); unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
__free_page(proc->pages[i]); __free_page(proc->alloc.pages[i]);
page_count++; page_count++;
} }
kfree(proc->pages); kfree(proc->alloc.pages);
vfree(proc->buffer); vfree(proc->alloc.buffer);
} }
put_task_struct(proc->tsk); put_task_struct(proc->tsk);
@ -3911,7 +3953,8 @@ static void print_binder_proc(struct seq_file *m,
print_binder_ref(m, rb_entry(n, struct binder_ref, print_binder_ref(m, rb_entry(n, struct binder_ref,
rb_node_desc)); rb_node_desc));
} }
for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n)) for (n = rb_first(&proc->alloc.allocated_buffers);
n != NULL; n = rb_next(n))
print_binder_buffer(m, " buffer", print_binder_buffer(m, " buffer",
rb_entry(n, struct binder_buffer, rb_node)); rb_entry(n, struct binder_buffer, rb_node));
list_for_each_entry(w, &proc->todo, entry) list_for_each_entry(w, &proc->todo, entry)
@ -4028,7 +4071,7 @@ static void print_binder_proc_stats(struct seq_file *m,
" ready threads %d\n" " ready threads %d\n"
" free async space %zd\n", proc->requested_threads, " free async space %zd\n", proc->requested_threads,
proc->requested_threads_started, proc->max_threads, proc->requested_threads_started, proc->max_threads,
proc->ready_threads, proc->free_async_space); proc->ready_threads, proc->alloc.free_async_space);
count = 0; count = 0;
for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
count++; count++;
@ -4046,7 +4089,8 @@ static void print_binder_proc_stats(struct seq_file *m,
seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak); seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
count = 0; count = 0;
for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n)) for (n = rb_first(&proc->alloc.allocated_buffers);
n != NULL; n = rb_next(n))
count++; count++;
seq_printf(m, " buffers: %d\n", count); seq_printf(m, " buffers: %d\n", count);

View File

@ -280,7 +280,7 @@ TRACE_EVENT(binder_update_page_range,
TP_fast_assign( TP_fast_assign(
__entry->proc = proc->pid; __entry->proc = proc->pid;
__entry->allocate = allocate; __entry->allocate = allocate;
__entry->offset = start - proc->buffer; __entry->offset = start - proc->alloc.buffer;
__entry->size = end - start; __entry->size = end - start;
), ),
TP_printk("proc=%d allocate=%d offset=%zu size=%zu", TP_printk("proc=%d allocate=%d offset=%zu size=%zu",