Add single-threaded path to _int_free

This patch adds single-threaded fast paths to _int_free.
Bypass the explicit locking for larger allocations.

	* malloc/malloc.c (_int_free): Add SINGLE_THREAD_P fast paths.

(cherry-picked from a15d53e2de)
This commit is contained in:
Wilco Dijkstra 2017-10-20 17:27:53 +01:00 committed by Siddhesh Poyarekar
parent 0e24837040
commit e759c32364
2 changed files with 32 additions and 13 deletions

View File

@ -1,3 +1,7 @@
2017-10-20 Wilco Dijkstra <wdijkstr@arm.com>
* malloc/malloc.c (_int_free): Add SINGLE_THREAD_P fast paths.
2017-10-19 Wilco Dijkstra <wdijkstr@arm.com>
* malloc/malloc.c (_int_free): Fix deadlock bug in consistency check.

View File

@ -4172,24 +4172,34 @@ _int_free (mstate av, mchunkptr p, int have_lock)
/* Atomically link P to its fastbin: P->FD = *FB; *FB = P; */
mchunkptr old = *fb, old2;
unsigned int old_idx = ~0u;
do
if (SINGLE_THREAD_P)
{
/* Check that the top of the bin is not the record we are going to add
(i.e., double free). */
/* Check that the top of the bin is not the record we are going to
add (i.e., double free). */
if (__builtin_expect (old == p, 0))
malloc_printerr ("double free or corruption (fasttop)");
/* Check that size of fastbin chunk at the top is the same as
size of the chunk that we are adding. We can dereference OLD
only if we have the lock, otherwise it might have already been
deallocated. See use of OLD_IDX below for the actual check. */
if (have_lock && old != NULL)
old_idx = fastbin_index(chunksize(old));
p->fd = old2 = old;
p->fd = old;
*fb = p;
}
while ((old = catomic_compare_and_exchange_val_rel (fb, p, old2)) != old2);
else
do
{
/* Check that the top of the bin is not the record we are going to
add (i.e., double free). */
if (__builtin_expect (old == p, 0))
malloc_printerr ("double free or corruption (fasttop)");
p->fd = old2 = old;
}
while ((old = catomic_compare_and_exchange_val_rel (fb, p, old2))
!= old2);
if (have_lock && old != NULL && __builtin_expect (old_idx != idx, 0))
/* Check that size of fastbin chunk at the top is the same as
size of the chunk that we are adding. We can dereference OLD
only if we have the lock, otherwise it might have already been
allocated again. */
if (have_lock && old != NULL
&& __builtin_expect (fastbin_index (chunksize (old)) != idx, 0))
malloc_printerr ("invalid fastbin entry (free)");
}
@ -4198,6 +4208,11 @@ _int_free (mstate av, mchunkptr p, int have_lock)
*/
else if (!chunk_is_mmapped(p)) {
/* If we're single-threaded, don't lock the arena. */
if (SINGLE_THREAD_P)
have_lock = true;
if (!have_lock)
__libc_lock_lock (av->mutex);