Use relaxed atomics for malloc have_fastchunks

Currently free typically uses 2 atomic operations per call.  The have_fastchunks
flag indicates whether there are recently freed blocks in the fastbins.  This
is purely an optimization to avoid calling malloc_consolidate too often and
avoiding the overhead of walking all fast bins even if all are empty during a
sequence of allocations.  However using catomic_or to update the flag is
completely unnecessary since it can be changed into a simple boolean and
accessed using relaxed atomics.  There is no change in multi-threaded behaviour
given the flag is already approximate (it may be set when there are no blocks in
any fast bins, or it may be clear when there are free blocks that could be
consolidated).

Performance of malloc/free improves by 27% on a simple benchmark on AArch64
(both single and multithreaded). The number of load/store exclusive instructions
is reduced by 33%. Bench-malloc-thread speeds up by ~3% in all cases.

	* malloc/malloc.c (FASTCHUNKS_BIT): Remove.
	(have_fastchunks): Remove.
	(clear_fastchunks): Remove.
	(set_fastchunks): Remove.
	(malloc_state): Add have_fastchunks.
	(malloc_init_state): Use have_fastchunks.
	(do_check_malloc_state): Remove incorrect invariant checks.
	(_int_malloc): Use have_fastchunks.
	(_int_free): Likewise.
	(malloc_consolidate): Likewise.

(cherry-picked from e956075a5a)
This commit is contained in:
Siddhesh Poyarekar 2017-11-16 12:21:27 +05:30
parent ade53e0df7
commit aa5be982ea
2 changed files with 33 additions and 32 deletions

View File

@ -1,3 +1,16 @@
2017-10-17 Wilco Dijkstra <wdijkstr@arm.com>
* malloc/malloc.c (FASTCHUNKS_BIT): Remove.
(have_fastchunks): Remove.
(clear_fastchunks): Remove.
(set_fastchunks): Remove.
(malloc_state): Add have_fastchunks.
(malloc_init_state): Use have_fastchunks.
(do_check_malloc_state): Remove incorrect invariant checks.
(_int_malloc): Use have_fastchunks.
(_int_free): Likewise.
(malloc_consolidate): Likewise.
2017-10-17 Wilco Dijkstra <wdijkstr@arm.com>
* malloc/malloc.c (tcache_put): Inline.

View File

@ -1612,27 +1612,6 @@ typedef struct malloc_chunk *mfastbinptr;
#define FASTBIN_CONSOLIDATION_THRESHOLD (65536UL)
/*
Since the lowest 2 bits in max_fast don't matter in size comparisons,
they are used as flags.
*/
/*
FASTCHUNKS_BIT held in max_fast indicates that there are probably
some fastbin chunks. It is set true on entering a chunk into any
fastbin, and cleared only in malloc_consolidate.
The truth value is inverted so that have_fastchunks will be true
upon startup (since statics are zero-filled), simplifying
initialization checks.
*/
#define FASTCHUNKS_BIT (1U)
#define have_fastchunks(M) (((M)->flags & FASTCHUNKS_BIT) == 0)
#define clear_fastchunks(M) catomic_or (&(M)->flags, FASTCHUNKS_BIT)
#define set_fastchunks(M) catomic_and (&(M)->flags, ~FASTCHUNKS_BIT)
/*
NONCONTIGUOUS_BIT indicates that MORECORE does not return contiguous
regions. Otherwise, contiguity is exploited in merging together,
@ -1690,6 +1669,17 @@ get_max_fast (void)
----------- Internal state representation and initialization -----------
*/
/*
have_fastchunks indicates that there are probably some fastbin chunks.
It is set true on entering a chunk into any fastbin, and cleared early in
malloc_consolidate. The value is approximate since it may be set when there
are no fastbin chunks, or it may be clear even if there are fastbin chunks
available. Given it's sole purpose is to reduce number of redundant calls to
malloc_consolidate, it does not affect correctness. As a result we can safely
use relaxed atomic accesses.
*/
struct malloc_state
{
/* Serialize access. */
@ -1698,6 +1688,9 @@ struct malloc_state
/* Flags (formerly in max_fast). */
int flags;
/* Set if the fastbin chunks contain recently inserted free blocks. */
bool have_fastchunks;
/* Fastbins */
mfastbinptr fastbinsY[NFASTBINS];
@ -1841,7 +1834,7 @@ malloc_init_state (mstate av)
set_noncontiguous (av);
if (av == &main_arena)
set_max_fast (DEFAULT_MXFAST);
av->flags |= FASTCHUNKS_BIT;
atomic_store_relaxed (&av->have_fastchunks, false);
av->top = initial_top (av);
}
@ -2206,11 +2199,6 @@ do_check_malloc_state (mstate av)
}
}
if (total != 0)
assert (have_fastchunks (av));
else if (!have_fastchunks (av))
assert (total == 0);
/* check normal bins */
for (i = 1; i < NBINS; ++i)
{
@ -3701,7 +3689,7 @@ _int_malloc (mstate av, size_t bytes)
else
{
idx = largebin_index (nb);
if (have_fastchunks (av))
if (atomic_load_relaxed (&av->have_fastchunks))
malloc_consolidate (av);
}
@ -4116,7 +4104,7 @@ _int_malloc (mstate av, size_t bytes)
/* When we are using atomic ops to free fast chunks we can get
here for all block sizes. */
else if (have_fastchunks (av))
else if (atomic_load_relaxed (&av->have_fastchunks))
{
malloc_consolidate (av);
/* restore original bin index */
@ -4242,7 +4230,7 @@ _int_free (mstate av, mchunkptr p, int have_lock)
free_perturb (chunk2mem(p), size - 2 * SIZE_SZ);
set_fastchunks(av);
atomic_store_relaxed (&av->have_fastchunks, true);
unsigned int idx = fastbin_index(size);
fb = &fastbin (av, idx);
@ -4393,7 +4381,7 @@ _int_free (mstate av, mchunkptr p, int have_lock)
*/
if ((unsigned long)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD) {
if (have_fastchunks(av))
if (atomic_load_relaxed (&av->have_fastchunks))
malloc_consolidate(av);
if (av == &main_arena) {
@ -4464,7 +4452,7 @@ static void malloc_consolidate(mstate av)
*/
if (get_max_fast () != 0) {
clear_fastchunks(av);
atomic_store_relaxed (&av->have_fastchunks, false);
unsorted_bin = unsorted_chunks(av);