glibc/nptl/pthread_mutex_timedlock.c
Ulrich Drepper df47504c78 2006-07-28 Ulrich Drepper <drepper@redhat.com>
Jakub Jelinek  <jakub@redhat.com>

	* descr.h: Change ENQUEUE_MUTEX and DEQUEUE_MUTEX for bit 0
	notification of PI mutex.  Add ENQUEUE_MUTEX_PI.
	* pthreadP.h: Define PTHREAD_MUTEX_PI_* macros for PI mutex types.
	* pthread_mutex_setprioceilining.c: Adjust for mutex type name change.
	* pthread_mutex_init.c: Add support for priority inheritance mutex.
	* pthread_mutex_lock.c: Likewise.
	* pthread_mutex_timedlock.c: Likewise.
	* pthread_mutex_trylock.c: Likewise.
	* pthread_mutex_unlock.c: Likewise.
	* sysdeps/pthread/pthread_cond_broadcast.c: For PI mutexes wake
	all mutexes.
	* sysdeps/unix/sysv/linux/i386/i486/pthread_cond_broadcast.c: Likewise.
	* sysdeps/unix/sysv/linux/x86_64/pthread_cond_broadcast.c: Likewise.
	* sysdeps/unix/sysv/linux/pthread-pi-defines.sym: New file.
	* sysdeps/unix/sysv/linux/Makefile (gen-as-const-header): Add
	pthread-pi-defines.sym.
	* sysdeps/unix/sysv/linux/i386/lowlevellock.h: Define FUTEX_LOCK_PI,
	FUTEX_UNLOCK_PI, and FUTEX_TRYLOCK_PI.
	* sysdeps/unix/sysv/linux/x86_64/lowlevellock.h: Likewise.
	* sysdeps/unix/sysv/linux/bits/posix_opt.h: Define
	_POSIX_THREAD_PRIO_INHERIT to 200112L.
	* tst-mutex1.c: Adjust to allow use in PI mutex test.
	* tst-mutex2.c: Likewise.
	* tst-mutex3.c: Likewise.
	* tst-mutex4.c: Likewise.
	* tst-mutex5.c: Likewise.
	* tst-mutex6.c: Likewise.
	* tst-mutex7.c: Likewise.
	* tst-mutex7a.c: Likewise.
	* tst-mutex8.c: Likewise.
	* tst-mutex9.c: Likewise.
	* tst-robust1.c: Likewise.
	* tst-robust7.c: Likewise.
	* tst-robust8.c: Likewise.
	* tst-mutexpi1.c: New file.
	* tst-mutexpi2.c: New file.
	* tst-mutexpi3.c: New file.
	* tst-mutexpi4.c: New file.
	* tst-mutexpi5.c: New file.
	* tst-mutexpi6.c: New file.
	* tst-mutexpi7.c: New file.
	* tst-mutexpi7a.c: New file.
	* tst-mutexpi8.c: New file.
	* tst-mutexpi9.c: New file.
	* tst-robust1.c: New file.
	* tst-robust2.c: New file.
	* tst-robust3.c: New file.
	* tst-robust4.c: New file.
	* tst-robust5.c: New file.
	* tst-robust6.c: New file.
	* tst-robust7.c: New file.
	* tst-robust8.c: New file.
	* Makefile (tests): Add the new tests.

	* pthread_create.c (start_thread): Add some casts to avoid warnings.
	* pthread_mutex_destroy.c: Remove unneeded label.
2006-07-29 04:42:09 +00:00

356 lines
9.8 KiB
C

/* Copyright (C) 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, write to the Free
Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
02111-1307 USA. */
#include <assert.h>
#include <errno.h>
#include "pthreadP.h"
#include <lowlevellock.h>
int
pthread_mutex_timedlock (mutex, abstime)
pthread_mutex_t *mutex;
const struct timespec *abstime;
{
int oldval;
pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
int result = 0;
/* We must not check ABSTIME here. If the thread does not block
abstime must not be checked for a valid value. */
switch (__builtin_expect (mutex->__data.__kind, PTHREAD_MUTEX_TIMED_NP))
{
/* Recursive mutex. */
case PTHREAD_MUTEX_RECURSIVE_NP:
/* Check whether we already hold the mutex. */
if (mutex->__data.__owner == id)
{
/* Just bump the counter. */
if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
/* Overflow of the counter. */
return EAGAIN;
++mutex->__data.__count;
goto out;
}
/* We have to get the mutex. */
result = lll_mutex_timedlock (mutex->__data.__lock, abstime);
if (result != 0)
goto out;
/* Only locked once so far. */
mutex->__data.__count = 1;
break;
/* Error checking mutex. */
case PTHREAD_MUTEX_ERRORCHECK_NP:
/* Check whether we already hold the mutex. */
if (__builtin_expect (mutex->__data.__owner == id, 0))
return EDEADLK;
/* FALLTHROUGH */
case PTHREAD_MUTEX_TIMED_NP:
simple:
/* Normal mutex. */
result = lll_mutex_timedlock (mutex->__data.__lock, abstime);
break;
case PTHREAD_MUTEX_ADAPTIVE_NP:
if (! __is_smp)
goto simple;
if (lll_mutex_trylock (mutex->__data.__lock) != 0)
{
int cnt = 0;
int max_cnt = MIN (MAX_ADAPTIVE_COUNT,
mutex->__data.__spins * 2 + 10);
do
{
if (cnt++ >= max_cnt)
{
result = lll_mutex_timedlock (mutex->__data.__lock, abstime);
break;
}
#ifdef BUSY_WAIT_NOP
BUSY_WAIT_NOP;
#endif
}
while (lll_mutex_trylock (mutex->__data.__lock) != 0);
mutex->__data.__spins += (cnt - mutex->__data.__spins) / 8;
}
break;
case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
&mutex->__data.__list.__next);
oldval = mutex->__data.__lock;
do
{
again:
if ((oldval & FUTEX_OWNER_DIED) != 0)
{
/* The previous owner died. Try locking the mutex. */
int newval
= atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
id, oldval);
if (newval != oldval)
{
oldval = newval;
goto again;
}
/* We got the mutex. */
mutex->__data.__count = 1;
/* But it is inconsistent unless marked otherwise. */
mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
ENQUEUE_MUTEX (mutex);
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
/* Note that we deliberately exit here. If we fall
through to the end of the function __nusers would be
incremented which is not correct because the old
owner has to be discounted. */
return EOWNERDEAD;
}
/* Check whether we already hold the mutex. */
if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
{
if (mutex->__data.__kind
== PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
{
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
NULL);
return EDEADLK;
}
if (mutex->__data.__kind
== PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
{
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
NULL);
/* Just bump the counter. */
if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
/* Overflow of the counter. */
return EAGAIN;
++mutex->__data.__count;
return 0;
}
}
result = lll_robust_mutex_timedlock (mutex->__data.__lock, abstime,
id);
if (__builtin_expect (mutex->__data.__owner
== PTHREAD_MUTEX_NOTRECOVERABLE, 0))
{
/* This mutex is now not recoverable. */
mutex->__data.__count = 0;
lll_mutex_unlock (mutex->__data.__lock);
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
return ENOTRECOVERABLE;
}
if (result == ETIMEDOUT || result == EINVAL)
goto out;
oldval = result;
}
while ((oldval & FUTEX_OWNER_DIED) != 0);
mutex->__data.__count = 1;
ENQUEUE_MUTEX (mutex);
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
break;
case PTHREAD_MUTEX_PI_RECURSIVE_NP:
case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
case PTHREAD_MUTEX_PI_NORMAL_NP:
case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
{
int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
if (robust)
/* Note: robust PI futexes are signaled by setting bit 0. */
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
(void *) (((uintptr_t) &mutex->__data.__list.__next)
| 1));
oldval = mutex->__data.__lock;
/* Check whether we already hold the mutex. */
if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
{
if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
{
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
return EDEADLK;
}
if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
{
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
/* Just bump the counter. */
if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
/* Overflow of the counter. */
return EAGAIN;
++mutex->__data.__count;
return 0;
}
}
oldval = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
id, 0);
if (oldval != 0)
{
/* The mutex is locked. The kernel will now take care of
everything. The timeout value must be a relative value.
Convert it. */
INTERNAL_SYSCALL_DECL (__err);
int e = INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
FUTEX_LOCK_PI, 1, abstime);
if (INTERNAL_SYSCALL_ERROR_P (e, __err))
{
if (INTERNAL_SYSCALL_ERRNO (e, __err) == ETIMEDOUT)
return ETIMEDOUT;
if (INTERNAL_SYSCALL_ERRNO (e, __err) == ESRCH
|| INTERNAL_SYSCALL_ERRNO (e, __err) == EDEADLK)
{
assert (INTERNAL_SYSCALL_ERRNO (e, __err) != EDEADLK
|| (kind != PTHREAD_MUTEX_ERRORCHECK_NP
&& kind != PTHREAD_MUTEX_RECURSIVE_NP));
/* ESRCH can happen only for non-robust PI mutexes where
the owner of the lock died. */
assert (INTERNAL_SYSCALL_ERRNO (e, __err) != ESRCH
|| !robust);
/* Delay the thread until the timeout is reached.
Then return ETIMEDOUT. */
struct timespec reltime;
struct timespec now;
INTERNAL_SYSCALL (clock_gettime, __err, 2, CLOCK_REALTIME,
&now);
reltime.tv_sec = abstime->tv_sec - now.tv_sec;
reltime.tv_nsec = abstime->tv_nsec - now.tv_nsec;
if (reltime.tv_nsec < 0)
{
reltime.tv_nsec += 1000000000;
--reltime.tv_sec;
}
if (reltime.tv_sec >= 0)
while (__nanosleep_nocancel (&reltime, &reltime) != 0)
continue;
return ETIMEDOUT;
}
return INTERNAL_SYSCALL_ERRNO (e, __err);
}
oldval = mutex->__data.__lock;
assert (robust || (oldval & FUTEX_OWNER_DIED) == 0);
}
if (__builtin_expect (oldval & FUTEX_OWNER_DIED, 0))
{
atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);
/* We got the mutex. */
mutex->__data.__count = 1;
/* But it is inconsistent unless marked otherwise. */
mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
ENQUEUE_MUTEX_PI (mutex);
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
/* Note that we deliberately exit here. If we fall
through to the end of the function __nusers would be
incremented which is not correct because the old owner
has to be discounted. */
return EOWNERDEAD;
}
if (robust
&& __builtin_expect (mutex->__data.__owner
== PTHREAD_MUTEX_NOTRECOVERABLE, 0))
{
/* This mutex is now not recoverable. */
mutex->__data.__count = 0;
INTERNAL_SYSCALL_DECL (__err);
INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
FUTEX_UNLOCK_PI, 0, 0);
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
return ENOTRECOVERABLE;
}
mutex->__data.__count = 1;
if (robust)
{
ENQUEUE_MUTEX_PI (mutex);
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
}
}
break;
default:
/* Correct code cannot set any other type. */
return EINVAL;
}
if (result == 0)
{
/* Record the ownership. */
mutex->__data.__owner = id;
++mutex->__data.__nusers;
}
out:
return result;
}