From c25ec972c9c667ed18ee32acea11404f017a5693 Mon Sep 17 00:00:00 2001 From: David Anderson Date: Fri, 14 Feb 2020 12:44:48 -0800 Subject: [PATCH] AOSP: e2fsdroid: Don't skip unusable blocks in BaseFS. Currently, basefs_allocator will iterate through blocks owned by an inode until it finds a block that is free. This effectively ignores the logical to physical block mapping, which can lead to a bigger delta in the final image. An example of how this can happen is if the BaseFS has a deduplicated block (D), that is not deduplicated in the new image: Old image: 1 2 3 D 4 5 New image: 1 2 3 ? 4 5 If the allocator sees that "D" is not usable, and skips to block "4", we will have a non-ideal assignment. Bad image: 1 2 3 4 5 ? This patch refactors get_next_block() to acquire at most one block. It's called a single time, and then only called in a loop if absolutely no blocks can be acquired from anywhere else. In a Virtual A/B simulation, this reduces the COW snapshot size by about 90MB. Bug: 139201772 Test: manual test Change-Id: I354f0dee1ee191dba0e1f90491ed591dba388f7f From AOSP commit: a495b54f89b2ec0e46be8e3564e4852c6434687c --- contrib/android/basefs_allocator.c | 66 ++++++++++++++++++------------ 1 file changed, 39 insertions(+), 27 deletions(-) diff --git a/contrib/android/basefs_allocator.c b/contrib/android/basefs_allocator.c index 2c5b92b4..5c92ddc2 100644 --- a/contrib/android/basefs_allocator.c +++ b/contrib/android/basefs_allocator.c @@ -216,29 +216,30 @@ out: } /* Try and acquire the next usable block from the Base FS map. */ -static int get_next_block(ext2_filsys fs, struct base_fs_allocator *allocator, - struct block_range_list* list, blk64_t *ret) +static errcode_t get_next_block(ext2_filsys fs, struct base_fs_allocator *allocator, + struct block_range_list* list, blk64_t *ret) { blk64_t block; ext2fs_block_bitmap exclusive_map = allocator->exclusive_block_map; ext2fs_block_bitmap dedup_map = allocator->dedup_block_map; - while (list->head) { - block = consume_next_block(list); - if (block >= ext2fs_blocks_count(fs->super)) - continue; - if (ext2fs_test_block_bitmap2(exclusive_map, block)) { - ext2fs_unmark_block_bitmap2(exclusive_map, block); - *ret = block; - return 0; - } - if (ext2fs_test_block_bitmap2(dedup_map, block)) { - ext2fs_unmark_block_bitmap2(dedup_map, block); - *ret = block; - return 0; - } + if (!list->head) + return EXT2_ET_BLOCK_ALLOC_FAIL; + + block = consume_next_block(list); + if (block >= ext2fs_blocks_count(fs->super)) + return EXT2_ET_BLOCK_ALLOC_FAIL; + if (ext2fs_test_block_bitmap2(exclusive_map, block)) { + ext2fs_unmark_block_bitmap2(exclusive_map, block); + *ret = block; + return 0; } - return -1; + if (ext2fs_test_block_bitmap2(dedup_map, block)) { + ext2fs_unmark_block_bitmap2(dedup_map, block); + *ret = block; + return 0; + } + return EXT2_ET_BLOCK_ALLOC_FAIL; } /* @@ -299,17 +300,28 @@ static errcode_t basefs_block_allocator(ext2_filsys fs, blk64_t goal, ext2fs_mark_block_bitmap2(fs->block_map, *ret); return 0; } - if (retval == EXT2_ET_BLOCK_ALLOC_FAIL) { - /* Try to steal a block from the dedup pool. */ - retval = ext2fs_find_first_set_block_bitmap2(dedup_map, - fs->super->s_first_data_block, - ext2fs_blocks_count(fs->super) - 1, ret); - if (!retval) { - ext2fs_unmark_block_bitmap2(dedup_map, *ret); - return 0; - } + if (retval != EXT2_ET_BLOCK_ALLOC_FAIL) + return retval; + + /* Try to steal a block from the dedup pool. */ + retval = ext2fs_find_first_set_block_bitmap2(dedup_map, + fs->super->s_first_data_block, + ext2fs_blocks_count(fs->super) - 1, ret); + if (!retval) { + ext2fs_unmark_block_bitmap2(dedup_map, *ret); + return 0; } - return retval; + + /* + * As a last resort, take any block from our file's list. This + * risks bloating the diff, but means we are more likely to + * successfully build an image. + */ + while (e->blocks.head) { + if (!get_next_block(fs, allocator, &e->blocks, ret)) + return 0; + } + return EXT2_ET_BLOCK_ALLOC_FAIL; } void base_fs_alloc_cleanup(ext2_filsys fs)