diff --git a/ChangeLog b/ChangeLog index 192acbf7d5..67d3503afe 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,16 @@ +2017-10-17 Wilco Dijkstra + + * malloc/malloc.c (FASTCHUNKS_BIT): Remove. + (have_fastchunks): Remove. + (clear_fastchunks): Remove. + (set_fastchunks): Remove. + (malloc_state): Add have_fastchunks. + (malloc_init_state): Use have_fastchunks. + (do_check_malloc_state): Remove incorrect invariant checks. + (_int_malloc): Use have_fastchunks. + (_int_free): Likewise. + (malloc_consolidate): Likewise. + 2017-10-17 Wilco Dijkstra * malloc/malloc.c (tcache_put): Inline. diff --git a/malloc/malloc.c b/malloc/malloc.c index 546579bce4..37e3c44e72 100644 --- a/malloc/malloc.c +++ b/malloc/malloc.c @@ -1612,27 +1612,6 @@ typedef struct malloc_chunk *mfastbinptr; #define FASTBIN_CONSOLIDATION_THRESHOLD (65536UL) -/* - Since the lowest 2 bits in max_fast don't matter in size comparisons, - they are used as flags. - */ - -/* - FASTCHUNKS_BIT held in max_fast indicates that there are probably - some fastbin chunks. It is set true on entering a chunk into any - fastbin, and cleared only in malloc_consolidate. - - The truth value is inverted so that have_fastchunks will be true - upon startup (since statics are zero-filled), simplifying - initialization checks. - */ - -#define FASTCHUNKS_BIT (1U) - -#define have_fastchunks(M) (((M)->flags & FASTCHUNKS_BIT) == 0) -#define clear_fastchunks(M) catomic_or (&(M)->flags, FASTCHUNKS_BIT) -#define set_fastchunks(M) catomic_and (&(M)->flags, ~FASTCHUNKS_BIT) - /* NONCONTIGUOUS_BIT indicates that MORECORE does not return contiguous regions. Otherwise, contiguity is exploited in merging together, @@ -1690,6 +1669,17 @@ get_max_fast (void) ----------- Internal state representation and initialization ----------- */ +/* + have_fastchunks indicates that there are probably some fastbin chunks. + It is set true on entering a chunk into any fastbin, and cleared early in + malloc_consolidate. The value is approximate since it may be set when there + are no fastbin chunks, or it may be clear even if there are fastbin chunks + available. Given it's sole purpose is to reduce number of redundant calls to + malloc_consolidate, it does not affect correctness. As a result we can safely + use relaxed atomic accesses. + */ + + struct malloc_state { /* Serialize access. */ @@ -1698,6 +1688,9 @@ struct malloc_state /* Flags (formerly in max_fast). */ int flags; + /* Set if the fastbin chunks contain recently inserted free blocks. */ + bool have_fastchunks; + /* Fastbins */ mfastbinptr fastbinsY[NFASTBINS]; @@ -1841,7 +1834,7 @@ malloc_init_state (mstate av) set_noncontiguous (av); if (av == &main_arena) set_max_fast (DEFAULT_MXFAST); - av->flags |= FASTCHUNKS_BIT; + atomic_store_relaxed (&av->have_fastchunks, false); av->top = initial_top (av); } @@ -2206,11 +2199,6 @@ do_check_malloc_state (mstate av) } } - if (total != 0) - assert (have_fastchunks (av)); - else if (!have_fastchunks (av)) - assert (total == 0); - /* check normal bins */ for (i = 1; i < NBINS; ++i) { @@ -3701,7 +3689,7 @@ _int_malloc (mstate av, size_t bytes) else { idx = largebin_index (nb); - if (have_fastchunks (av)) + if (atomic_load_relaxed (&av->have_fastchunks)) malloc_consolidate (av); } @@ -4116,7 +4104,7 @@ _int_malloc (mstate av, size_t bytes) /* When we are using atomic ops to free fast chunks we can get here for all block sizes. */ - else if (have_fastchunks (av)) + else if (atomic_load_relaxed (&av->have_fastchunks)) { malloc_consolidate (av); /* restore original bin index */ @@ -4242,7 +4230,7 @@ _int_free (mstate av, mchunkptr p, int have_lock) free_perturb (chunk2mem(p), size - 2 * SIZE_SZ); - set_fastchunks(av); + atomic_store_relaxed (&av->have_fastchunks, true); unsigned int idx = fastbin_index(size); fb = &fastbin (av, idx); @@ -4393,7 +4381,7 @@ _int_free (mstate av, mchunkptr p, int have_lock) */ if ((unsigned long)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD) { - if (have_fastchunks(av)) + if (atomic_load_relaxed (&av->have_fastchunks)) malloc_consolidate(av); if (av == &main_arena) { @@ -4464,7 +4452,7 @@ static void malloc_consolidate(mstate av) */ if (get_max_fast () != 0) { - clear_fastchunks(av); + atomic_store_relaxed (&av->have_fastchunks, false); unsorted_bin = unsorted_chunks(av);