Remove hppa linuxthreads support.

We now require NPTL and TLS to build glibc therefore
the hppa linuxthreads support is no longer needed.
Debian has already transitioned to NPTL support and
we will continue to work out NPTL issues.

Signed-off-by: Carlos O'Donell <carlos@systemhalted.org>
This commit is contained in:
Carlos O'Donell 2011-10-20 14:49:52 -04:00
parent 9dd87de7ff
commit 25f991b858
10 changed files with 9 additions and 1036 deletions

View File

@ -1,3 +1,12 @@
2011-10-20 Carlos O'Donell <carlos@systemhalted.org>
* sysdeps/unix/sysv/linux/hppa/linuxthreads/aio_cancel.c: Remove.
* sysdeps/unix/sysv/linux/hppa/linuxthreads/bits/initspin.h: Remove.
* sysdeps/unix/sysv/linux/hppa/linuxthreads/bits/pthreadtypes.h: Remove.
* sysdeps/unix/sysv/linux/hppa/linuxthreads/malloc-machine.h: Remove.
* sysdeps/unix/sysv/linux/hppa/linuxthreads/pt-initfini.c: Remove.
* sysdeps/unix/sysv/linux/hppa/linuxthreads/sysdep-cancel.h: Remove.
2011-10-20 Carlos O'Donell <carlos@systemhalted.org>
* sysdeps/hppa/stackinfo.h: Update copyright year.

View File

@ -1,82 +0,0 @@
/* POSIX spinlock implementation. hppa version.
Copyright (C) 2000 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation; either version 2.1 of the
License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; see the file COPYING.LIB. If not,
write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA. */
#include <errno.h>
#include <pthread.h>
#include "internals.h"
int
__pthread_spin_lock (pthread_spinlock_t *lock)
{
volatile unsigned int *addr = __ldcw_align (lock);
while (__ldcw (addr) == 0)
while (*addr == 0) ;
return 0;
}
weak_alias (__pthread_spin_lock, pthread_spin_lock)
int
__pthread_spin_trylock (pthread_spinlock_t *lock)
{
volatile unsigned int *a = __ldcw_align (lock);
return __ldcw (a) ? 0 : EBUSY;
}
weak_alias (__pthread_spin_trylock, pthread_spin_trylock)
int
__pthread_spin_unlock (pthread_spinlock_t *lock)
{
volatile unsigned int *a = __ldcw_align (lock);
int tmp = 1;
/* This should be a memory barrier to newer compilers */
__asm__ __volatile__ ("stw,ma %1,0(%0)"
: : "r" (a), "r" (tmp) : "memory");
return 0;
}
weak_alias (__pthread_spin_unlock, pthread_spin_unlock)
int
__pthread_spin_init (pthread_spinlock_t *lock, int pshared)
{
/* We can ignore the `pshared' parameter. Since we are busy-waiting
all processes which can access the memory location `lock' points
to can use the spinlock. */
volatile unsigned int *a = __ldcw_align (lock);
int tmp = 1;
/* This should be a memory barrier to newer compilers */
__asm__ __volatile__ ("stw,ma %1,0(%0)"
: : "r" (a), "r" (tmp) : "memory");
return 0;
}
weak_alias (__pthread_spin_init, pthread_spin_init)
int
__pthread_spin_destroy (pthread_spinlock_t *lock)
{
/* Nothing to do. */
return 0;
}
weak_alias (__pthread_spin_destroy, pthread_spin_destroy)

View File

@ -1,134 +0,0 @@
/* Machine-dependent pthreads configuration and inline functions.
hppa version.
Copyright (C) 2000, 2002, 2003 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Richard Henderson <rth@tamu.edu>.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation; either version 2.1 of the
License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; see the file COPYING.LIB. If not,
write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA. */
#ifndef _PT_MACHINE_H
#define _PT_MACHINE_H 1
#include <sys/types.h>
#include <bits/initspin.h>
#ifndef PT_EI
# define PT_EI extern inline __attribute__ ((always_inline))
#endif
extern inline long int testandset (__atomic_lock_t *spinlock);
extern inline int __compare_and_swap (long int *p, long int oldval, long int newval);
extern inline int lock_held (__atomic_lock_t *spinlock);
extern inline int __load_and_clear (__atomic_lock_t *spinlock);
/* Get some notion of the current stack. Need not be exactly the top
of the stack, just something somewhere in the current frame. */
#define CURRENT_STACK_FRAME stack_pointer
register char * stack_pointer __asm__ ("%r30");
/* Get/Set thread-specific pointer. We have to call into the kernel to
* modify it, but we can read it in user mode. */
#ifndef THREAD_SELF
#define THREAD_SELF __get_cr27()
#endif
#ifndef SET_THREAD_SELF
#define SET_THREAD_SELF(descr) __set_cr27(descr)
#endif
/* Use this to determine type */
struct _pthread_descr_struct *__thread_self;
static inline struct _pthread_descr_struct * __get_cr27(void)
{
long cr27;
asm ("mfctl %%cr27, %0" : "=r" (cr27) : );
return (struct _pthread_descr_struct *) cr27;
}
#ifndef INIT_THREAD_SELF
#define INIT_THREAD_SELF(descr, nr) __set_cr27(descr)
#endif
static inline void __set_cr27(struct _pthread_descr_struct * cr27)
{
asm ( "ble 0xe0(%%sr2, %%r0)\n\t"
"copy %0, %%r26"
: : "r" (cr27) : "r26" );
}
/* We want the OS to assign stack addresses. */
#define FLOATING_STACKS 1
#define ARCH_STACK_MAX_SIZE 8*1024*1024
/* The hppa only has one atomic read and modify memory operation,
load and clear, so hppa spinlocks must use zero to signify that
someone is holding the lock. The address used for the ldcw
semaphore must be 16-byte aligned. */
#define __ldcw(a) \
({ \
unsigned int __ret; \
__asm__ __volatile__("ldcw 0(%1),%0" \
: "=r" (__ret) : "r" (a) : "memory"); \
__ret; \
})
/* Strongly ordered lock reset */
#define __lock_reset(lock_addr, tmp) \
({ \
__asm__ __volatile__ ("stw,ma %1,0(%0)" \
: : "r" (lock_addr), "r" (tmp) : "memory"); \
})
/* Because malloc only guarantees 8-byte alignment for malloc'd data,
and GCC only guarantees 8-byte alignment for stack locals, we can't
be assured of 16-byte alignment for atomic lock data even if we
specify "__attribute ((aligned(16)))" in the type declaration. So,
we use a struct containing an array of four ints for the atomic lock
type and dynamically select the 16-byte aligned int from the array
for the semaphore. */
#define __PA_LDCW_ALIGNMENT 16
#define __ldcw_align(a) ({ \
volatile unsigned int __ret = (unsigned int) a; \
if ((__ret & ~(__PA_LDCW_ALIGNMENT - 1)) < (unsigned int) a) \
__ret = (__ret & ~(__PA_LDCW_ALIGNMENT - 1)) + __PA_LDCW_ALIGNMENT; \
(unsigned int *) __ret; \
})
/* Spinlock implementation; required. */
PT_EI int
__load_and_clear (__atomic_lock_t *spinlock)
{
volatile unsigned int *a = __ldcw_align (spinlock);
return __ldcw (a);
}
/* Emulate testandset */
PT_EI long int
testandset (__atomic_lock_t *spinlock)
{
return (__load_and_clear(spinlock) == 0);
}
PT_EI int
lock_held (__atomic_lock_t *spinlock)
{
volatile unsigned int *a = __ldcw_align (spinlock);
return *a == 0;
}
#endif /* pt-machine.h */

View File

@ -1,163 +0,0 @@
/* Definition for thread-local data handling. linuxthreads/hppa version.
Copyright (C) 2005 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, write to the Free
Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
02111-1307 USA. */
#ifndef _TLS_H
#define _TLS_H
#ifndef __ASSEMBLER__
# include <pt-machine.h>
# include <stdbool.h>
# include <stddef.h>
/* Type for the dtv. */
typedef union dtv
{
size_t counter;
struct
{
void *val;
bool is_static;
} pointer;
} dtv_t;
#else /* __ASSEMBLER__ */
# include <tcb-offsets.h>
#endif /* __ASSEMBLER__ */
#if defined HAVE_TLS_SUPPORT
/* Signal that TLS support is available. */
# define USE_TLS 1
# ifndef __ASSEMBLER__
typedef struct
{
dtv_t *dtv;
void *private;
} tcbhead_t;
/* Include some syscall information for other headers */
# include <sysdep.h>
/* This is the size of the initial TCB. */
# define TLS_INIT_TCB_SIZE sizeof (tcbhead_t)
/* Alignment requirements for the initial TCB. */
# define TLS_INIT_TCB_ALIGN __alignof__ (tcbhead_t)
/* This is the size of the TCB. */
# define TLS_TCB_SIZE sizeof (tcbhead_t)
/* This is the size we need before TCB. */
# define TLS_PRE_TCB_SIZE sizeof (struct _pthread_descr_struct)
/* Alignment requirements for the TCB. */
# define TLS_TCB_ALIGN __alignof__ (struct _pthread_descr_struct)
/* The TLS blocks start right after the TCB. */
# define TLS_DTV_AT_TP 1
/* Return the thread descriptor for the current thread. */
# undef THREAD_SELF
# define THREAD_SELF \
({ struct _pthread_descr_struct *__self; \
__self = __get_cr27(); \
__self - 1; \
})
# undef INIT_THREAD_SELF
# define INIT_THREAD_SELF(descr, nr) \
({ struct _pthread_descr_struct *__self = (void *)descr; \
__set_cr27(__self + 1); \
0; \
})
/* Access to data in the thread descriptor is easy. */
#define THREAD_GETMEM(descr, member) \
((void) sizeof (descr), THREAD_SELF->member)
#define THREAD_GETMEM_NC(descr, member) \
((void) sizeof (descr), THREAD_SELF->member)
#define THREAD_SETMEM(descr, member, value) \
((void) sizeof (descr), THREAD_SELF->member = (value))
#define THREAD_SETMEM_NC(descr, member, value) \
((void) sizeof (descr), THREAD_SELF->member = (value))
/* Install the dtv pointer. The pointer passed is to the element with
index -1 which contain the length. */
# define INSTALL_DTV(tcbp, dtvp) \
((tcbhead_t *) (tcbp))->dtv = dtvp + 1
/* Install new dtv for current thread. */
# define INSTALL_NEW_DTV(dtv) \
({ tcbhead_t *__tcbp = (tcbhead_t *)__get_cr27(); \
__tcbp->dtv = dtv; \
})
/* Return dtv of given thread descriptor. */
# define GET_DTV(tcbp) \
(((tcbhead_t *) (tcbp))->dtv)
/* Code to initially initialize the thread pointer. This might need
special attention since 'errno' is not yet available and if the
operation can cause a failure 'errno' must not be touched. */
# define TLS_INIT_TP(tcbp, secondcall) \
({ __set_cr27(tcbp); 0; })
/* Return the address of the dtv for the current thread. */
# define THREAD_DTV() \
({ tcbhead_t *__tcbp = (tcbhead_t *)__get_cr27(); \
__tcbp->dtv; \
})
# define TLS_MULTIPLE_THREADS_IN_TCB 1
/* Get the thread descriptor definition. This must be after the
the definition of THREAD_SELF for TLS. */
# include <linuxthreads/descr.h>
# endif /* __ASSEMBLER__ */
#else
# ifndef __ASSEMBLER__
typedef struct
{
void *tcb;
dtv_t *dtv;
void *self;
int multiple_threads;
} tcbhead_t;
/* Get the thread descriptor definition. */
# include <linuxthreads/descr.h>
# define NONTLS_INIT_TP \
do { \
static const tcbhead_t nontls_init_tp = { .multiple_threads = 0 }; \
INIT_THREAD_SELF(&nontls_init_tp, 0); \
} while (0)
# endif /* __ASSEMBLER__ */
#endif /* HAVE_TLS_SUPPORT */
#endif /* tls.h */

View File

@ -1,33 +0,0 @@
#include <shlib-compat.h>
#define aio_cancel64 XXX
#include <aio.h>
#undef aio_cancel64
#include <errno.h>
extern __typeof (aio_cancel) __new_aio_cancel;
extern __typeof (aio_cancel) __old_aio_cancel;
#define aio_cancel __new_aio_cancel
#include <sysdeps/pthread/aio_cancel.c>
#undef aio_cancel
strong_alias (__new_aio_cancel, __new_aio_cancel64);
versioned_symbol (librt, __new_aio_cancel, aio_cancel, GLIBC_2_3);
versioned_symbol (librt, __new_aio_cancel64, aio_cancel64, GLIBC_2_3);
#if SHLIB_COMPAT (librt, GLIBC_2_1, GLIBC_2_3)
#undef ECANCELED
#define aio_cancel __old_aio_cancel
#define ECANCELED 125
#include <sysdeps/pthread/aio_cancel.c>
#undef aio_cancel
strong_alias (__old_aio_cancel, __old_aio_cancel64);
compat_symbol (librt, __old_aio_cancel, aio_cancel, GLIBC_2_1);
compat_symbol (librt, __old_aio_cancel64, aio_cancel64, GLIBC_2_1);
#endif

View File

@ -1,41 +0,0 @@
/* PA-RISC specific definitions for spinlock initializers.
Copyright (C) 2000, 2001 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation; either version 2.1 of the
License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; see the file COPYING.LIB. If not,
write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA. */
/* Initial value of a spinlock. PA-RISC only implements atomic load
and clear so this must be non-zero. */
#define __LT_SPINLOCK_INIT ((__atomic_lock_t) { { 1, 1, 1, 1 } })
/* Initialize global spinlocks without cast, generally macro wrapped */
#define __LT_SPINLOCK_ALT_INIT { { 1, 1, 1, 1 } }
/* Macros for lock initializers, not using the above definition.
The above definition is not used in the case that static initializers
use this value. */
#define __LOCK_ALT_INITIALIZER { __LT_SPINLOCK_ALT_INIT, 0 }
/* Used to initialize _pthread_fastlock's in non-static case */
#define __LOCK_INITIALIZER ((struct _pthread_fastlock){ __LT_SPINLOCK_INIT, 0 })
/* Used in pthread_atomic initialization */
#define __ATOMIC_INITIALIZER { 0, __LT_SPINLOCK_ALT_INIT }
/* Tell the rest of the code that the initializer is non-zero without
explaining it's internal structure */
#define __LT_INITIALIZER_NOT_ZERO

View File

@ -1,159 +0,0 @@
/* Linuxthreads - a simple clone()-based implementation of Posix */
/* threads for Linux. */
/* Copyright (C) 1996 Xavier Leroy (Xavier.Leroy@inria.fr) */
/* */
/* This program is free software; you can redistribute it and/or */
/* modify it under the terms of the GNU Library General Public License */
/* as published by the Free Software Foundation; either version 2 */
/* of the License, or (at your option) any later version. */
/* */
/* This program is distributed in the hope that it will be useful, */
/* but WITHOUT ANY WARRANTY; without even the implied warranty of */
/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
/* GNU Library General Public License for more details. */
#if !defined _BITS_TYPES_H && !defined _PTHREAD_H
# error "Never include <bits/pthreadtypes.h> directly; use <sys/types.h> instead."
#endif
#ifndef _BITS_PTHREADTYPES_H
#define _BITS_PTHREADTYPES_H 1
#define __need_schedparam
#include <bits/sched.h>
/* We need 128-bit alignment for the ldcw semaphore. At most, we are
assured of 64-bit alignment for stack locals and malloc'd data. Thus,
we use a struct with four ints for the atomic lock type. The locking
code will figure out which of the four to use for the ldcw semaphore. */
typedef volatile struct {
int lock[4];
} __attribute__ ((aligned(16))) __atomic_lock_t;
/* Fast locks (not abstract because mutexes and conditions aren't abstract). */
struct _pthread_fastlock
{
__atomic_lock_t __spinlock; /* Used by compare_and_swap emulation. Also,
adaptive SMP lock stores spin count here. */
long int __status; /* "Free" or "taken" or head of waiting list */
};
#ifndef _PTHREAD_DESCR_DEFINED
/* Thread descriptors */
typedef struct _pthread_descr_struct *_pthread_descr;
# define _PTHREAD_DESCR_DEFINED
#endif
/* Attributes for threads. */
typedef struct __pthread_attr_s
{
int __detachstate;
int __schedpolicy;
struct __sched_param __schedparam;
int __inheritsched;
int __scope;
size_t __guardsize;
int __stackaddr_set;
void *__stackaddr;
size_t __stacksize;
} pthread_attr_t;
/* Conditions (not abstract because of PTHREAD_COND_INITIALIZER */
#ifdef __GLIBC_HAVE_LONG_LONG
__extension__ typedef long long __pthread_cond_align_t;
#else
typedef long __pthread_cond_align_t;
#endif
typedef struct
{
struct _pthread_fastlock __c_lock; /* Protect against concurrent access */
_pthread_descr __c_waiting; /* Threads waiting on this condition */
char __padding[48 - sizeof (struct _pthread_fastlock)
- sizeof (_pthread_descr) - sizeof (__pthread_cond_align_t)];
__pthread_cond_align_t __align;
} pthread_cond_t;
/* Attribute for conditionally variables. */
typedef struct
{
int __dummy;
} pthread_condattr_t;
/* Keys for thread-specific data */
typedef unsigned int pthread_key_t;
/* Mutexes (not abstract because of PTHREAD_MUTEX_INITIALIZER). */
/* (The layout is unnatural to maintain binary compatibility
with earlier releases of LinuxThreads.) */
typedef struct
{
int __m_reserved; /* Reserved for future use */
int __m_count; /* Depth of recursive locking */
_pthread_descr __m_owner; /* Owner thread (if recursive or errcheck) */
int __m_kind; /* Mutex kind: fast, recursive or errcheck */
struct _pthread_fastlock __m_lock; /* Underlying fast lock */
} pthread_mutex_t;
/* Attribute for mutex. */
typedef struct
{
int __mutexkind;
} pthread_mutexattr_t;
/* Once-only execution */
typedef int pthread_once_t;
#if defined __USE_UNIX98 || defined __USE_XOPEN2K
/* Read-write locks. */
typedef struct _pthread_rwlock_t
{
struct _pthread_fastlock __rw_lock; /* Lock to guarantee mutual exclusion */
int __rw_readers; /* Number of readers */
_pthread_descr __rw_writer; /* Identity of writer, or NULL if none */
_pthread_descr __rw_read_waiting; /* Threads waiting for reading */
_pthread_descr __rw_write_waiting; /* Threads waiting for writing */
int __rw_kind; /* Reader/Writer preference selection */
int __rw_pshared; /* Shared between processes or not */
} pthread_rwlock_t;
/* Attribute for read-write locks. */
typedef struct
{
int __lockkind;
int __pshared;
} pthread_rwlockattr_t;
#endif
#ifdef __USE_XOPEN2K
/* POSIX spinlock data type. */
typedef __atomic_lock_t pthread_spinlock_t;
/* POSIX barrier. */
typedef struct {
struct _pthread_fastlock __ba_lock; /* Lock to guarantee mutual exclusion */
int __ba_required; /* Threads needed for completion */
int __ba_present; /* Threads waiting */
_pthread_descr __ba_waiting; /* Queue of waiting threads */
} pthread_barrier_t;
/* barrier attribute */
typedef struct {
int __pshared;
} pthread_barrierattr_t;
#endif
/* Thread identifiers */
typedef unsigned long int pthread_t;
#endif /* bits/pthreadtypes.h */

View File

@ -1,73 +0,0 @@
/* HP-PARISC macro definitions for mutexes, thread-specific data
and parameters for malloc.
Copyright (C) 2003 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Carlos O'Donell <carlos@baldric.uwo.ca>, 2003.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, write to the Free
Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
02111-1307 USA. */
#ifndef _MALLOC_MACHINE_H
#define _MALLOC_MACHINE_H
#undef thread_atfork_static
#include <atomic.h>
#include <bits/libc-lock.h>
__libc_lock_define (typedef, mutex_t)
/* Since our lock structure does not tolerate being initialized to zero, we must
modify the standard function calls made by malloc */
# define mutex_init(m) \
__libc_maybe_call (__pthread_mutex_init, (m, NULL), \
(((m)->__m_lock.__spinlock = __LT_SPINLOCK_INIT),(*(int *)(m))) )
# define mutex_lock(m) \
__libc_maybe_call (__pthread_mutex_lock, (m), \
(__load_and_clear(&((m)->__m_lock.__spinlock)), 0))
# define mutex_trylock(m) \
__libc_maybe_call (__pthread_mutex_trylock, (m), \
(*(int *)(m) ? 1 : (__load_and_clear(&((m)->__m_lock.__spinlock)), 0)))
# define mutex_unlock(m) \
__libc_maybe_call (__pthread_mutex_unlock, (m), \
(((m)->__m_lock.__spinlock = __LT_SPINLOCK_INIT), (*(int *)(m))) )
/* This is defined by newer gcc version unique for each module. */
extern void *__dso_handle __attribute__ ((__weak__));
#include <fork.h>
#ifdef SHARED
# define thread_atfork(prepare, parent, child) \
__register_atfork (prepare, parent, child, __dso_handle)
#else
# define thread_atfork(prepare, parent, child) \
__register_atfork (prepare, parent, child, \
&__dso_handle == NULL ? NULL : __dso_handle)
#endif
/* thread specific data for glibc */
#include <bits/libc-tsd.h>
typedef int tsd_key_t[1]; /* no key data structure, libc magic does it */
__libc_tsd_define (static, void *, MALLOC) /* declaration/common definition */
#define tsd_key_create(key, destr) ((void) (key))
#define tsd_setspecific(key, data) __libc_tsd_set (void *, MALLOC, (data))
#define tsd_getspecific(key, vptr) ((vptr) = __libc_tsd_get (void *, MALLOC))
#include <sysdeps/generic/malloc-machine.h>
#endif /* !defined(_MALLOC_MACHINE_H) */

View File

@ -1,109 +0,0 @@
/* Special .init and .fini section support for HPPA. Linuxthreads version.
Copyright (C) 2001, 2003 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it
and/or modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
In addition to the permissions in the GNU Lesser General Public
License, the Free Software Foundation gives you unlimited
permission to link the compiled version of this file with other
programs, and to distribute those programs without any restriction
coming from the use of this file. (The Lesser General Public
License restrictions do apply in other respects; for example, they
cover modification of the file, and distribution when not linked
into another program.)
The GNU C Library is distributed in the hope that it will be
useful, but WITHOUT ANY WARRANTY; without even the implied warranty
of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; see the file COPYING.LIB. If not,
write to the Free Software Foundation, 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA. */
/* This file is compiled into assembly code which is then munged by a sed
script into two files: crti.s and crtn.s.
* crti.s puts a function prologue at the beginning of the
.init and .fini sections and defines global symbols for
those addresses, so they can be called as functions.
* crtn.s puts the corresponding function epilogues
in the .init and .fini sections. */
/* If we use the standard C version, the linkage table pointer won't
be properly preserved due to the splitting up of function prologues
and epilogues. Therefore we write these in assembly to make sure
they do the right thing. */
__asm__ (
"#include \"defs.h\"\n"
"\n"
"/*@HEADER_ENDS*/\n"
"\n"
"/*@_init_PROLOG_BEGINS*/\n"
" .section .init\n"
" .align 4\n"
" .globl _init\n"
" .type _init,@function\n"
"_init:\n"
" stw %rp,-20(%sp)\n"
" stwm %r4,64(%sp)\n"
" stw %r19,-32(%sp)\n"
" bl __pthread_initialize_minimal,%rp\n"
" copy %r19,%r4 /* delay slot */\n"
" copy %r4,%r19\n"
"/*@_init_PROLOG_ENDS*/\n"
"\n"
"/*@_init_EPILOG_BEGINS*/\n"
"/* Here is the tail end of _init. */\n"
" .section .init\n"
" ldw -84(%sp),%rp\n"
" copy %r4,%r19\n"
" bv %r0(%rp)\n"
"_end_init:\n"
" ldwm -64(%sp),%r4\n"
"\n"
"/* Our very own unwind info, because the assembler can't handle\n"
" functions split into two or more pieces. */\n"
" .section .PARISC.unwind,\"a\",@progbits\n"
" .extern _init\n"
" .word _init, _end_init\n"
" .byte 0x08, 0x01, 0x00, 0x08, 0x00, 0x00, 0x00, 0x08\n"
"\n"
"/*@_init_EPILOG_ENDS*/\n"
"\n"
"/*@_fini_PROLOG_BEGINS*/\n"
" .section .fini\n"
" .align 4\n"
" .globl _fini\n"
" .type _fini,@function\n"
"_fini:\n"
" stw %rp,-20(%sp)\n"
" stwm %r4,64(%sp)\n"
" stw %r19,-32(%sp)\n"
" copy %r19,%r4\n"
"/*@_fini_PROLOG_ENDS*/\n"
"\n"
"/*@_fini_EPILOG_BEGINS*/\n"
" .section .fini\n"
" ldw -84(%sp),%rp\n"
" copy %r4,%r19\n"
" bv %r0(%rp)\n"
"_end_fini:\n"
" ldwm -64(%sp),%r4\n"
"\n"
" .section .PARISC.unwind,\"a\",@progbits\n"
" .extern _fini\n"
" .word _fini, _end_fini\n"
" .byte 0x08, 0x01, 0x00, 0x08, 0x00, 0x00, 0x00, 0x08\n"
"\n"
"/*@_fini_EPILOG_ENDS*/\n"
"\n"
"/*@TRAILER_BEGINS*/\n"
);

View File

@ -1,242 +0,0 @@
/* cancellable system calls for Linux/HPPA.
Copyright (C) 2003 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Carlos O'Donell <carlos@baldric.uwo.ca>, 2003.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, write to the Free
Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
02111-1307 USA. */
#include <sysdep.h>
#include <tls.h>
#ifndef __ASSEMBLER__
# include <linuxthreads/internals.h>
#endif
#if !defined NOT_IN_libc || defined IS_IN_libpthread || defined IS_IN_librt
# ifndef NO_ERROR
# define NO_ERROR -0x1000
# endif
/* The syscall cancellation mechanism requires userspace
assistance, the following code does roughly this:
do arguments (read arg5 and arg6 to registers)
setup frame
check if there are threads, yes jump to pseudo_cancel
unthreaded:
syscall
check syscall return (jump to pre_end)
set errno
set return to -1
(jump to pre_end)
pseudo_cancel:
cenable
syscall
cdisable
check syscall return (jump to pre_end)
set errno
set return to -1
pre_end
restore stack
It is expected that 'ret' and 'END' macros will
append an 'undo arguments' and 'return' to the
this PSEUDO macro. */
# undef PSEUDO
# define PSEUDO(name, syscall_name, args) \
ENTRY (name) \
DOARGS_##args ASM_LINE_SEP \
copy TREG, %r1 ASM_LINE_SEP \
copy %sp, TREG ASM_LINE_SEP \
stwm %r1, 64(%sp) ASM_LINE_SEP \
stw %rp, -20(%sp) ASM_LINE_SEP \
stw TREG, -4(%sp) ASM_LINE_SEP \
/* Done setting up frame, continue... */ ASM_LINE_SEP \
SINGLE_THREAD_P ASM_LINE_SEP \
cmpib,<>,n 0,%ret0,L(pseudo_cancel) ASM_LINE_SEP \
L(unthreaded): ASM_LINE_SEP \
/* Save r19 */ ASM_LINE_SEP \
SAVE_PIC(TREG) ASM_LINE_SEP \
/* Do syscall, delay loads # */ ASM_LINE_SEP \
ble 0x100(%sr2,%r0) ASM_LINE_SEP \
ldi SYS_ify (syscall_name), %r20 /* delay */ ASM_LINE_SEP \
ldi NO_ERROR,%r1 ASM_LINE_SEP \
cmpb,>>=,n %r1,%ret0,L(pre_end) ASM_LINE_SEP \
/* Restore r19 from TREG */ ASM_LINE_SEP \
LOAD_PIC(TREG) /* delay */ ASM_LINE_SEP \
SYSCALL_ERROR_HANDLER ASM_LINE_SEP \
/* Use TREG for temp storage */ ASM_LINE_SEP \
copy %ret0, TREG /* delay */ ASM_LINE_SEP \
/* OPTIMIZE: Don't reload r19 */ ASM_LINE_SEP \
/* do a -1*syscall_ret0 */ ASM_LINE_SEP \
sub %r0, TREG, TREG ASM_LINE_SEP \
/* Store into errno location */ ASM_LINE_SEP \
stw TREG, 0(%sr0,%ret0) ASM_LINE_SEP \
b L(pre_end) ASM_LINE_SEP \
/* return -1 as error */ ASM_LINE_SEP \
ldo -1(%r0), %ret0 /* delay */ ASM_LINE_SEP \
L(pseudo_cancel): ASM_LINE_SEP \
PUSHARGS_##args /* Save args */ ASM_LINE_SEP \
/* Save r19 into TREG */ ASM_LINE_SEP \
CENABLE /* FUNC CALL */ ASM_LINE_SEP \
SAVE_PIC(TREG) /* delay */ ASM_LINE_SEP \
/* restore syscall args */ ASM_LINE_SEP \
POPARGS_##args ASM_LINE_SEP \
/* save mask from cenable (use stub rp slot) */ ASM_LINE_SEP \
stw %ret0, -24(%sp) ASM_LINE_SEP \
/* ... SYSCALL ... */ ASM_LINE_SEP \
ble 0x100(%sr2,%r0) ASM_LINE_SEP \
ldi SYS_ify (syscall_name), %r20 /* delay */ ASM_LINE_SEP \
/* ............... */ ASM_LINE_SEP \
LOAD_PIC(TREG) ASM_LINE_SEP \
/* pass mask as arg0 to cdisable */ ASM_LINE_SEP \
ldw -24(%sp), %r26 ASM_LINE_SEP \
CDISABLE ASM_LINE_SEP \
stw %ret0, -24(%sp) /* delay */ ASM_LINE_SEP \
/* Restore syscall return */ ASM_LINE_SEP \
ldw -24(%sp), %ret0 ASM_LINE_SEP \
/* compare error */ ASM_LINE_SEP \
ldi NO_ERROR,%r1 ASM_LINE_SEP \
/* branch if no error */ ASM_LINE_SEP \
cmpb,>>=,n %r1,%ret0,L(pre_end) ASM_LINE_SEP \
LOAD_PIC(TREG) /* cond. nullify */ ASM_LINE_SEP \
copy %ret0, TREG /* save syscall return */ ASM_LINE_SEP \
SYSCALL_ERROR_HANDLER ASM_LINE_SEP \
/* make syscall res value positive */ ASM_LINE_SEP \
sub %r0, TREG, TREG /* delay */ ASM_LINE_SEP \
/* No need to LOAD_PIC */ ASM_LINE_SEP \
/* store into errno location */ ASM_LINE_SEP \
stw TREG, 0(%sr0,%ret0) ASM_LINE_SEP \
/* return -1 */ ASM_LINE_SEP \
ldo -1(%r0), %ret0 ASM_LINE_SEP \
L(pre_end): ASM_LINE_SEP \
/* Restore rp before exit */ ASM_LINE_SEP \
ldw -84(%sr0,%sp), %rp ASM_LINE_SEP \
/* Undo frame */ ASM_LINE_SEP \
ldwm -64(%sp),TREG ASM_LINE_SEP \
/* No need to LOAD_PIC */ ASM_LINE_SEP
/* Save arguments into our frame */
# define PUSHARGS_0 /* nothing to do */
# define PUSHARGS_1 PUSHARGS_0 stw %r26, -36(%sr0,%sp) ASM_LINE_SEP
# define PUSHARGS_2 PUSHARGS_1 stw %r25, -40(%sr0,%sp) ASM_LINE_SEP
# define PUSHARGS_3 PUSHARGS_2 stw %r24, -44(%sr0,%sp) ASM_LINE_SEP
# define PUSHARGS_4 PUSHARGS_3 stw %r23, -48(%sr0,%sp) ASM_LINE_SEP
# define PUSHARGS_5 PUSHARGS_4 stw %r22, -52(%sr0,%sp) ASM_LINE_SEP
# define PUSHARGS_6 PUSHARGS_5 stw %r21, -56(%sr0,%sp) ASM_LINE_SEP
/* Bring them back from the stack */
# define POPARGS_0 /* nothing to do */
# define POPARGS_1 POPARGS_0 ldw -36(%sr0,%sp), %r26 ASM_LINE_SEP
# define POPARGS_2 POPARGS_1 ldw -40(%sr0,%sp), %r25 ASM_LINE_SEP
# define POPARGS_3 POPARGS_2 ldw -44(%sr0,%sp), %r24 ASM_LINE_SEP
# define POPARGS_4 POPARGS_3 ldw -48(%sr0,%sp), %r23 ASM_LINE_SEP
# define POPARGS_5 POPARGS_4 ldw -52(%sr0,%sp), %r22 ASM_LINE_SEP
# define POPARGS_6 POPARGS_5 ldw -56(%sr0,%sp), %r21 ASM_LINE_SEP
# ifdef IS_IN_libpthread
# ifdef PIC
# define CENABLE .import __pthread_enable_asynccancel,code ASM_LINE_SEP \
bl __pthread_enable_asynccancel,%r2 ASM_LINE_SEP
# define CDISABLE .import __pthread_disable_asynccancel,code ASM_LINE_SEP \
bl __pthread_disable_asynccancel,%r2 ASM_LINE_SEP
# else
# define CENABLE .import __pthread_enable_asynccancel,code ASM_LINE_SEP \
bl __pthread_enable_asynccancel,%r2 ASM_LINE_SEP
# define CDISABLE .import __pthread_disable_asynccancel,code ASM_LINE_SEP \
bl __pthread_disable_asynccancel,%r2 ASM_LINE_SEP
# endif
# elif !defined NOT_IN_libc
# ifdef PIC
# define CENABLE .import __libc_enable_asynccancel,code ASM_LINE_SEP \
bl __libc_enable_asynccancel,%r2 ASM_LINE_SEP
# define CDISABLE .import __libc_disable_asynccancel,code ASM_LINE_SEP \
bl __libc_disable_asynccancel,%r2 ASM_LINE_SEP
# else
# define CENABLE .import __libc_enable_asynccancel,code ASM_LINE_SEP \
bl __libc_enable_asynccancel,%r2 ASM_LINE_SEP
# define CDISABLE .import __libc_disable_asynccancel,code ASM_LINE_SEP \
bl __libc_disable_asynccancel,%r2 ASM_LINE_SEP
# endif
# else
# ifdef PIC
# define CENABLE .import __librt_enable_asynccancel,code ASM_LINE_SEP \
bl __librt_enable_asynccancel,%r2 ASM_LINE_SEP
# define CDISABLE .import __librt_disable_asynccancel,code ASM_LINE_SEP \
bl __librt_disable_asynccancel,%r2 ASM_LINE_SEP
# else
# define CENABLE .import __librt_enable_asynccancel,code ASM_LINE_SEP \
bl __librt_enable_asynccancel,%r2 ASM_LINE_SEP
# define CDISABLE .import __librt_disable_asynccancel,code ASM_LINE_SEP \
bl __librt_disable_asynccancel,%r2 ASM_LINE_SEP
# endif
# endif
/* p_header.multiple_threads is +12 from the pthread_descr struct start,
We could have called __get_cr27() but we really want less overhead */
# define MULTIPLE_THREADS_OFFSET 0xC
/* cr27 has been initialized to 0x0 by kernel */
# define NO_THREAD_CR27 0x0
# ifdef IS_IN_libpthread
# define __local_multiple_threads __pthread_multiple_threads
# elif !defined NOT_IN_libc
# define __local_multiple_threads __libc_multiple_threads
# else
# define __local_multiple_threads __librt_multiple_threads
# endif
# ifndef __ASSEMBLER__
# if !defined NOT_IN_libc || defined IS_IN_libpthread
extern int __local_multiple_threads attribute_hidden;
# else
extern int __local_multiple_threads;
# endif
# define SINGLE_THREAD_P __builtin_expect (__local_multiple_threads == 0, 1)
# else
/* This ALT version requires newer kernel support */
# define SINGLE_THREAD_P_MFCTL \
mfctl %cr27, %ret0 ASM_LINE_SEP \
cmpib,= NO_THREAD_CR27,%ret0,L(stp) ASM_LINE_SEP \
nop ASM_LINE_SEP \
ldw MULTIPLE_THREADS_OFFSET(%sr0,%ret0),%ret0 ASM_LINE_SEP \
L(stp): ASM_LINE_SEP
# ifdef PIC
/* Slower version uses GOT to get value of __local_multiple_threads */
# define SINGLE_THREAD_P \
addil LT%__local_multiple_threads, %r19 ASM_LINE_SEP \
ldw RT%__local_multiple_threads(%sr0,%r1), %ret0 ASM_LINE_SEP \
ldw 0(%sr0,%ret0), %ret0 ASM_LINE_SEP
# else
/* Slow non-pic version using DP */
# define SINGLE_THREAD_P \
addil LR%__local_multiple_threads-$global$,%r27 ASM_LINE_SEP \
ldw RR%__local_multiple_threads-$global$(%sr0,%r1),%ret0 ASM_LINE_SEP
# endif
# endif
#elif !defined __ASSEMBLER__
/* This code should never be used but we define it anyhow. */
# define SINGLE_THREAD_P (1)
#endif
/* !defined NOT_IN_libc || defined IS_IN_libpthread */