On Mon, Jan 11, 2021 at 11:23:15PM +0100, David Sterba wrote: > On Wed, Dec 16, 2020 at 11:22:15AM -0500, Josef Bacik wrote: > > --- a/fs/btrfs/relocation.c > > +++ b/fs/btrfs/relocation.c > > @@ -98,6 +98,7 @@ struct tree_block { > > u64 bytenr; > > }; /* Use rb_simple_node for search/insert */ > > struct btrfs_key key; > > + u64 owner; > > unsigned int level:8; > > unsigned int key_ready:1; > > This would probably lead to bad packing, key is 17 bytes and placing > u64 after that adds 7 bytes for proper alignment. The bitfield members > following the key are aligned to a byte so it would work if owner is > before key.
Easy fix and size 64 is also more cache friendly. @@ -12256,22 +12256,18 @@ struct tree_block { struct rb_node rb_node __attribute__((__aligned__(8))); /* 0 24 */ u64 bytenr; /* 24 8 */ } __attribute__((__aligned__(8))) __attribute__((__aligned__(8))); /* 0 32 */ - struct btrfs_key key; /* 32 17 */ + u64 owner; /* 32 8 */ + struct btrfs_key key; /* 40 17 */ - /* XXX 7 bytes hole, try to pack */ + /* Bitfield combined with next fields */ - u64 owner; /* 56 8 */ - /* --- cacheline 1 boundary (64 bytes) --- */ - unsigned int level:8; /* 64: 0 4 */ - unsigned int key_ready:1; /* 64: 8 4 */ + unsigned int level:8; /* 56: 8 4 */ + unsigned int key_ready:1; /* 56:16 4 */ - /* size: 72, cachelines: 2, members: 5 */ - /* sum members: 57, holes: 1, sum holes: 7 */ - /* sum bitfield members: 9 bits (1 bytes) */ + /* size: 64, cachelines: 1, members: 5 */ /* padding: 4 */ - /* bit_padding: 23 bits */ + /* bit_padding: 15 bits */ /* forced alignments: 1 */ - /* last cacheline: 8 bytes */ } __attribute__((__aligned__(8)));