mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-19 18:53:52 +08:00
btrfs: reorder btrfs_transaction members for better packing
There are now 20 bytes of holes, we can reduce that to 4 by minor changes. Moving 'aborted' to the status and flags is also more logical, similar for num_dirty_bgs. The size goes from 432 to 416. Reviewed-by: Liu Bo <bo.li.liu@oracle.com> Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
parent
165c8b022c
commit
5302e08964
@ -58,6 +58,7 @@ struct btrfs_transaction {
|
||||
|
||||
/* Be protected by fs_info->trans_lock when we want to change it. */
|
||||
enum btrfs_trans_state state;
|
||||
int aborted;
|
||||
struct list_head list;
|
||||
struct extent_io_tree dirty_pages;
|
||||
unsigned long start_time;
|
||||
@ -70,7 +71,6 @@ struct btrfs_transaction {
|
||||
struct list_head dirty_bgs;
|
||||
struct list_head io_bgs;
|
||||
struct list_head dropped_roots;
|
||||
unsigned int num_dirty_bgs;
|
||||
|
||||
/*
|
||||
* we need to make sure block group deletion doesn't race with
|
||||
@ -79,11 +79,11 @@ struct btrfs_transaction {
|
||||
*/
|
||||
struct mutex cache_write_mutex;
|
||||
spinlock_t dirty_bgs_lock;
|
||||
unsigned int num_dirty_bgs;
|
||||
/* Protected by spin lock fs_info->unused_bgs_lock. */
|
||||
struct list_head deleted_bgs;
|
||||
spinlock_t dropped_roots_lock;
|
||||
struct btrfs_delayed_ref_root delayed_refs;
|
||||
int aborted;
|
||||
struct btrfs_fs_info *fs_info;
|
||||
};
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user