mirror of
https://sourceware.org/git/glibc.git
synced 2024-11-23 17:53:37 +08:00
sparc: Use atomic compiler builtins on sparc
This patch removes the arch-specific atomic instruction, relying on compiler builtins. The __sparc32_atomic_locks support is removed and a configure check is added to check if compiler uses libatomic to implement CAS. It also removes the sparc specific sem_* and pthread_barrier_* implementations. It in turn allows buidling against a LEON3/LEON4 sparcv8 target, although it will still be incompatible with generic sparcv9. Checked on sparcv9-linux-gnu and sparc64-linux-gnu. I also checked with build against sparcv8-linux-gnu with -mcpu=leon3. Tested-by: Andreas Larsson <andreas@gaisler.com>
This commit is contained in:
parent
5d9b7b9fa7
commit
3b5ebe85aa
@ -16,5 +16,14 @@ CPPFLAGS-crti.S += -fPIC
|
||||
CPPFLAGS-crtn.S += -fPIC
|
||||
endif
|
||||
|
||||
# nscd uses atomic_spin_nop which in turn requires cpu_relax
|
||||
ifeq ($(subdir),nscd)
|
||||
routines += cpu_relax
|
||||
endif
|
||||
|
||||
ifeq ($(subdir), nptl)
|
||||
libpthread-routines += cpu_relax
|
||||
endif
|
||||
|
||||
# The assembler on SPARC needs the -fPIC flag even when it's assembler code.
|
||||
ASFLAGS-.os += -fPIC
|
||||
|
95
sysdeps/sparc/atomic-machine.h
Normal file
95
sysdeps/sparc/atomic-machine.h
Normal file
@ -0,0 +1,95 @@
|
||||
/* Atomic operations. Sparc version.
|
||||
Copyright (C) 2019 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
|
||||
The GNU C Library is free software; you can redistribute it and/or
|
||||
modify it under the terms of the GNU Lesser General Public
|
||||
License as published by the Free Software Foundation; either
|
||||
version 2.1 of the License, or (at your option) any later version.
|
||||
|
||||
The GNU C Library is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
Lesser General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Lesser General Public
|
||||
License along with the GNU C Library; if not, see
|
||||
<https://www.gnu.org/licenses/>. */
|
||||
|
||||
#ifndef _ATOMIC_MACHINE_H
|
||||
#define _ATOMIC_MACHINE_H 1
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
typedef int8_t atomic8_t;
|
||||
typedef uint8_t uatomic8_t;
|
||||
typedef int_fast8_t atomic_fast8_t;
|
||||
typedef uint_fast8_t uatomic_fast8_t;
|
||||
|
||||
typedef int16_t atomic16_t;
|
||||
typedef uint16_t uatomic16_t;
|
||||
typedef int_fast16_t atomic_fast16_t;
|
||||
typedef uint_fast16_t uatomic_fast16_t;
|
||||
|
||||
typedef int32_t atomic32_t;
|
||||
typedef uint32_t uatomic32_t;
|
||||
typedef int_fast32_t atomic_fast32_t;
|
||||
typedef uint_fast32_t uatomic_fast32_t;
|
||||
|
||||
typedef int64_t atomic64_t;
|
||||
typedef uint64_t uatomic64_t;
|
||||
typedef int_fast64_t atomic_fast64_t;
|
||||
typedef uint_fast64_t uatomic_fast64_t;
|
||||
|
||||
typedef intptr_t atomicptr_t;
|
||||
typedef uintptr_t uatomicptr_t;
|
||||
typedef intmax_t atomic_max_t;
|
||||
typedef uintmax_t uatomic_max_t;
|
||||
|
||||
#ifdef __arch64__
|
||||
# define __HAVE_64B_ATOMICS 1
|
||||
#else
|
||||
# define __HAVE_64B_ATOMICS 0
|
||||
#endif
|
||||
#define USE_ATOMIC_COMPILER_BUILTINS 1
|
||||
|
||||
/* XXX Is this actually correct? */
|
||||
#define ATOMIC_EXCHANGE_USES_CAS __HAVE_64B_ATOMICS
|
||||
|
||||
/* Compare and exchange.
|
||||
For all "bool" routines, we return FALSE if exchange succesful. */
|
||||
|
||||
#define __arch_compare_and_exchange_val_int(mem, newval, oldval, model) \
|
||||
({ \
|
||||
typeof (*mem) __oldval = (oldval); \
|
||||
__atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \
|
||||
model, __ATOMIC_RELAXED); \
|
||||
__oldval; \
|
||||
})
|
||||
|
||||
#define atomic_compare_and_exchange_val_acq(mem, new, old) \
|
||||
({ \
|
||||
__typeof ((__typeof (*(mem))) *(mem)) __result; \
|
||||
if (sizeof (*mem) == 4 \
|
||||
|| (__HAVE_64B_ATOMICS && sizeof (*mem) == 8)) \
|
||||
__result = __arch_compare_and_exchange_val_int (mem, new, old, \
|
||||
__ATOMIC_ACQUIRE); \
|
||||
else \
|
||||
abort (); \
|
||||
__result; \
|
||||
})
|
||||
|
||||
#ifdef __sparc_v9__
|
||||
# define atomic_full_barrier() \
|
||||
__asm __volatile ("membar #LoadLoad | #LoadStore" \
|
||||
" | #StoreLoad | #StoreStore" : : : "memory")
|
||||
# define atomic_read_barrier() \
|
||||
__asm __volatile ("membar #LoadLoad | #LoadStore" : : : "memory")
|
||||
# define atomic_write_barrier() \
|
||||
__asm __volatile ("membar #LoadStore | #StoreStore" : : : "memory")
|
||||
|
||||
extern void __cpu_relax (void);
|
||||
# define atomic_spin_nop() __cpu_relax ()
|
||||
#endif
|
||||
|
||||
#endif /* _ATOMIC_MACHINE_H */
|
@ -1,4 +1,4 @@
|
||||
/* CPU strand yielding for busy loops. Linux/sparc64 version.
|
||||
/* CPU strand yielding for busy loops. Linux/sparc version.
|
||||
Copyright (C) 2017-2019 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
|
||||
@ -18,6 +18,7 @@
|
||||
|
||||
#include <sparc-ifunc.h>
|
||||
|
||||
#ifdef __sparc_v9__
|
||||
static void
|
||||
__cpu_relax_generic (void)
|
||||
{
|
||||
@ -36,3 +37,4 @@ sparc_libc_ifunc (__cpu_relax,
|
||||
hwcap & HWCAP_SPARC_PAUSE
|
||||
? __cpu_relax_pause
|
||||
: __cpu_relax_generic)
|
||||
#endif
|
@ -1,363 +0,0 @@
|
||||
/* Atomic operations. sparc32 version.
|
||||
Copyright (C) 2003-2019 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
Contributed by Jakub Jelinek <jakub@redhat.com>, 2003.
|
||||
|
||||
The GNU C Library is free software; you can redistribute it and/or
|
||||
modify it under the terms of the GNU Lesser General Public
|
||||
License as published by the Free Software Foundation; either
|
||||
version 2.1 of the License, or (at your option) any later version.
|
||||
|
||||
The GNU C Library is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
Lesser General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Lesser General Public
|
||||
License along with the GNU C Library; if not, see
|
||||
<https://www.gnu.org/licenses/>. */
|
||||
|
||||
#ifndef _ATOMIC_MACHINE_H
|
||||
#define _ATOMIC_MACHINE_H 1
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
typedef int8_t atomic8_t;
|
||||
typedef uint8_t uatomic8_t;
|
||||
typedef int_fast8_t atomic_fast8_t;
|
||||
typedef uint_fast8_t uatomic_fast8_t;
|
||||
|
||||
typedef int16_t atomic16_t;
|
||||
typedef uint16_t uatomic16_t;
|
||||
typedef int_fast16_t atomic_fast16_t;
|
||||
typedef uint_fast16_t uatomic_fast16_t;
|
||||
|
||||
typedef int32_t atomic32_t;
|
||||
typedef uint32_t uatomic32_t;
|
||||
typedef int_fast32_t atomic_fast32_t;
|
||||
typedef uint_fast32_t uatomic_fast32_t;
|
||||
|
||||
typedef int64_t atomic64_t;
|
||||
typedef uint64_t uatomic64_t;
|
||||
typedef int_fast64_t atomic_fast64_t;
|
||||
typedef uint_fast64_t uatomic_fast64_t;
|
||||
|
||||
typedef intptr_t atomicptr_t;
|
||||
typedef uintptr_t uatomicptr_t;
|
||||
typedef intmax_t atomic_max_t;
|
||||
typedef uintmax_t uatomic_max_t;
|
||||
|
||||
#define __HAVE_64B_ATOMICS 0
|
||||
#define USE_ATOMIC_COMPILER_BUILTINS 0
|
||||
|
||||
/* XXX Is this actually correct? */
|
||||
#define ATOMIC_EXCHANGE_USES_CAS 1
|
||||
|
||||
|
||||
/* We have no compare and swap, just test and set.
|
||||
The following implementation contends on 64 global locks
|
||||
per library and assumes no variable will be accessed using atomic.h
|
||||
macros from two different libraries. */
|
||||
|
||||
__make_section_unallocated
|
||||
(".gnu.linkonce.b.__sparc32_atomic_locks, \"aw\", %nobits");
|
||||
|
||||
volatile unsigned char __sparc32_atomic_locks[64]
|
||||
__attribute__ ((nocommon, section (".gnu.linkonce.b.__sparc32_atomic_locks"
|
||||
__sec_comment),
|
||||
visibility ("hidden")));
|
||||
|
||||
#define __sparc32_atomic_do_lock(addr) \
|
||||
do \
|
||||
{ \
|
||||
unsigned int __old_lock; \
|
||||
unsigned int __idx = (((long) addr >> 2) ^ ((long) addr >> 12)) \
|
||||
& 63; \
|
||||
do \
|
||||
__asm __volatile ("ldstub %1, %0" \
|
||||
: "=r" (__old_lock), \
|
||||
"=m" (__sparc32_atomic_locks[__idx]) \
|
||||
: "m" (__sparc32_atomic_locks[__idx]) \
|
||||
: "memory"); \
|
||||
while (__old_lock); \
|
||||
} \
|
||||
while (0)
|
||||
|
||||
#define __sparc32_atomic_do_unlock(addr) \
|
||||
do \
|
||||
{ \
|
||||
__sparc32_atomic_locks[(((long) addr >> 2) \
|
||||
^ ((long) addr >> 12)) & 63] = 0; \
|
||||
__asm __volatile ("" ::: "memory"); \
|
||||
} \
|
||||
while (0)
|
||||
|
||||
#define __sparc32_atomic_do_lock24(addr) \
|
||||
do \
|
||||
{ \
|
||||
unsigned int __old_lock; \
|
||||
do \
|
||||
__asm __volatile ("ldstub %1, %0" \
|
||||
: "=r" (__old_lock), "=m" (*(addr)) \
|
||||
: "m" (*(addr)) \
|
||||
: "memory"); \
|
||||
while (__old_lock); \
|
||||
} \
|
||||
while (0)
|
||||
|
||||
#define __sparc32_atomic_do_unlock24(addr) \
|
||||
do \
|
||||
{ \
|
||||
__asm __volatile ("" ::: "memory"); \
|
||||
*(char *) (addr) = 0; \
|
||||
} \
|
||||
while (0)
|
||||
|
||||
|
||||
#ifndef SHARED
|
||||
# define __v9_compare_and_exchange_val_32_acq(mem, newval, oldval) \
|
||||
({union { __typeof (oldval) a; uint32_t v; } oldval_arg = { .a = (oldval) }; \
|
||||
union { __typeof (newval) a; uint32_t v; } newval_arg = { .a = (newval) }; \
|
||||
register uint32_t __acev_tmp __asm ("%g6"); \
|
||||
register __typeof (mem) __acev_mem __asm ("%g1") = (mem); \
|
||||
register uint32_t __acev_oldval __asm ("%g5"); \
|
||||
__acev_tmp = newval_arg.v; \
|
||||
__acev_oldval = oldval_arg.v; \
|
||||
/* .word 0xcde05005 is cas [%g1], %g5, %g6. Can't use cas here though, \
|
||||
because as will then mark the object file as V8+ arch. */ \
|
||||
__asm __volatile (".word 0xcde05005" \
|
||||
: "+r" (__acev_tmp), "=m" (*__acev_mem) \
|
||||
: "r" (__acev_oldval), "m" (*__acev_mem), \
|
||||
"r" (__acev_mem) : "memory"); \
|
||||
(__typeof (oldval)) __acev_tmp; })
|
||||
#endif
|
||||
|
||||
/* The only basic operation needed is compare and exchange. */
|
||||
#define __v7_compare_and_exchange_val_acq(mem, newval, oldval) \
|
||||
({ __typeof (mem) __acev_memp = (mem); \
|
||||
__typeof (*mem) __acev_ret; \
|
||||
__typeof (*mem) __acev_newval = (newval); \
|
||||
\
|
||||
__sparc32_atomic_do_lock (__acev_memp); \
|
||||
__acev_ret = *__acev_memp; \
|
||||
if (__acev_ret == (oldval)) \
|
||||
*__acev_memp = __acev_newval; \
|
||||
__sparc32_atomic_do_unlock (__acev_memp); \
|
||||
__acev_ret; })
|
||||
|
||||
#define __v7_compare_and_exchange_bool_acq(mem, newval, oldval) \
|
||||
({ __typeof (mem) __aceb_memp = (mem); \
|
||||
int __aceb_ret; \
|
||||
__typeof (*mem) __aceb_newval = (newval); \
|
||||
\
|
||||
__sparc32_atomic_do_lock (__aceb_memp); \
|
||||
__aceb_ret = 0; \
|
||||
if (*__aceb_memp == (oldval)) \
|
||||
*__aceb_memp = __aceb_newval; \
|
||||
else \
|
||||
__aceb_ret = 1; \
|
||||
__sparc32_atomic_do_unlock (__aceb_memp); \
|
||||
__aceb_ret; })
|
||||
|
||||
#define __v7_exchange_acq(mem, newval) \
|
||||
({ __typeof (mem) __acev_memp = (mem); \
|
||||
__typeof (*mem) __acev_ret; \
|
||||
__typeof (*mem) __acev_newval = (newval); \
|
||||
\
|
||||
__sparc32_atomic_do_lock (__acev_memp); \
|
||||
__acev_ret = *__acev_memp; \
|
||||
*__acev_memp = __acev_newval; \
|
||||
__sparc32_atomic_do_unlock (__acev_memp); \
|
||||
__acev_ret; })
|
||||
|
||||
#define __v7_exchange_and_add(mem, value) \
|
||||
({ __typeof (mem) __acev_memp = (mem); \
|
||||
__typeof (*mem) __acev_ret; \
|
||||
\
|
||||
__sparc32_atomic_do_lock (__acev_memp); \
|
||||
__acev_ret = *__acev_memp; \
|
||||
*__acev_memp = __acev_ret + (value); \
|
||||
__sparc32_atomic_do_unlock (__acev_memp); \
|
||||
__acev_ret; })
|
||||
|
||||
/* Special versions, which guarantee that top 8 bits of all values
|
||||
are cleared and use those bits as the ldstub lock. */
|
||||
#define __v7_compare_and_exchange_val_24_acq(mem, newval, oldval) \
|
||||
({ __typeof (mem) __acev_memp = (mem); \
|
||||
__typeof (*mem) __acev_ret; \
|
||||
__typeof (*mem) __acev_newval = (newval); \
|
||||
\
|
||||
__sparc32_atomic_do_lock24 (__acev_memp); \
|
||||
__acev_ret = *__acev_memp & 0xffffff; \
|
||||
if (__acev_ret == (oldval)) \
|
||||
*__acev_memp = __acev_newval; \
|
||||
else \
|
||||
__sparc32_atomic_do_unlock24 (__acev_memp); \
|
||||
__asm __volatile ("" ::: "memory"); \
|
||||
__acev_ret; })
|
||||
|
||||
#define __v7_exchange_24_rel(mem, newval) \
|
||||
({ __typeof (mem) __acev_memp = (mem); \
|
||||
__typeof (*mem) __acev_ret; \
|
||||
__typeof (*mem) __acev_newval = (newval); \
|
||||
\
|
||||
__sparc32_atomic_do_lock24 (__acev_memp); \
|
||||
__acev_ret = *__acev_memp & 0xffffff; \
|
||||
*__acev_memp = __acev_newval; \
|
||||
__asm __volatile ("" ::: "memory"); \
|
||||
__acev_ret; })
|
||||
|
||||
#ifdef SHARED
|
||||
|
||||
/* When dynamically linked, we assume pre-v9 libraries are only ever
|
||||
used on pre-v9 CPU. */
|
||||
# define __atomic_is_v9 0
|
||||
|
||||
# define atomic_compare_and_exchange_val_acq(mem, newval, oldval) \
|
||||
__v7_compare_and_exchange_val_acq (mem, newval, oldval)
|
||||
|
||||
# define atomic_compare_and_exchange_bool_acq(mem, newval, oldval) \
|
||||
__v7_compare_and_exchange_bool_acq (mem, newval, oldval)
|
||||
|
||||
# define atomic_exchange_acq(mem, newval) \
|
||||
__v7_exchange_acq (mem, newval)
|
||||
|
||||
# define atomic_exchange_and_add(mem, value) \
|
||||
__v7_exchange_and_add (mem, value)
|
||||
|
||||
# define atomic_compare_and_exchange_val_24_acq(mem, newval, oldval) \
|
||||
({ \
|
||||
if (sizeof (*mem) != 4) \
|
||||
abort (); \
|
||||
__v7_compare_and_exchange_val_24_acq (mem, newval, oldval); })
|
||||
|
||||
# define atomic_exchange_24_rel(mem, newval) \
|
||||
({ \
|
||||
if (sizeof (*mem) != 4) \
|
||||
abort (); \
|
||||
__v7_exchange_24_rel (mem, newval); })
|
||||
|
||||
# define atomic_full_barrier() __asm ("" ::: "memory")
|
||||
# define atomic_read_barrier() atomic_full_barrier ()
|
||||
# define atomic_write_barrier() atomic_full_barrier ()
|
||||
|
||||
#else
|
||||
|
||||
/* In libc.a/libpthread.a etc. we don't know if we'll be run on
|
||||
pre-v9 or v9 CPU. To be interoperable with dynamically linked
|
||||
apps on v9 CPUs e.g. with process shared primitives, use cas insn
|
||||
on v9 CPUs and ldstub on pre-v9. */
|
||||
|
||||
extern uint64_t _dl_hwcap __attribute__((weak));
|
||||
# define __atomic_is_v9 \
|
||||
(__builtin_expect (&_dl_hwcap != 0, 1) \
|
||||
&& __builtin_expect (_dl_hwcap & HWCAP_SPARC_V9, HWCAP_SPARC_V9))
|
||||
|
||||
# define atomic_compare_and_exchange_val_acq(mem, newval, oldval) \
|
||||
({ \
|
||||
__typeof (*mem) __acev_wret; \
|
||||
if (sizeof (*mem) != 4) \
|
||||
abort (); \
|
||||
if (__atomic_is_v9) \
|
||||
__acev_wret \
|
||||
= __v9_compare_and_exchange_val_32_acq (mem, newval, oldval);\
|
||||
else \
|
||||
__acev_wret \
|
||||
= __v7_compare_and_exchange_val_acq (mem, newval, oldval); \
|
||||
__acev_wret; })
|
||||
|
||||
# define atomic_compare_and_exchange_bool_acq(mem, newval, oldval) \
|
||||
({ \
|
||||
int __acev_wret; \
|
||||
if (sizeof (*mem) != 4) \
|
||||
abort (); \
|
||||
if (__atomic_is_v9) \
|
||||
{ \
|
||||
__typeof (oldval) __acev_woldval = (oldval); \
|
||||
__acev_wret \
|
||||
= __v9_compare_and_exchange_val_32_acq (mem, newval, \
|
||||
__acev_woldval) \
|
||||
!= __acev_woldval; \
|
||||
} \
|
||||
else \
|
||||
__acev_wret \
|
||||
= __v7_compare_and_exchange_bool_acq (mem, newval, oldval); \
|
||||
__acev_wret; })
|
||||
|
||||
# define atomic_exchange_rel(mem, newval) \
|
||||
({ \
|
||||
__typeof (*mem) __acev_wret; \
|
||||
if (sizeof (*mem) != 4) \
|
||||
abort (); \
|
||||
if (__atomic_is_v9) \
|
||||
{ \
|
||||
__typeof (mem) __acev_wmemp = (mem); \
|
||||
__typeof (*(mem)) __acev_wval = (newval); \
|
||||
do \
|
||||
__acev_wret = *__acev_wmemp; \
|
||||
while (__builtin_expect \
|
||||
(__v9_compare_and_exchange_val_32_acq (__acev_wmemp,\
|
||||
__acev_wval, \
|
||||
__acev_wret) \
|
||||
!= __acev_wret, 0)); \
|
||||
} \
|
||||
else \
|
||||
__acev_wret = __v7_exchange_acq (mem, newval); \
|
||||
__acev_wret; })
|
||||
|
||||
# define atomic_compare_and_exchange_val_24_acq(mem, newval, oldval) \
|
||||
({ \
|
||||
__typeof (*mem) __acev_wret; \
|
||||
if (sizeof (*mem) != 4) \
|
||||
abort (); \
|
||||
if (__atomic_is_v9) \
|
||||
__acev_wret \
|
||||
= __v9_compare_and_exchange_val_32_acq (mem, newval, oldval);\
|
||||
else \
|
||||
__acev_wret \
|
||||
= __v7_compare_and_exchange_val_24_acq (mem, newval, oldval);\
|
||||
__acev_wret; })
|
||||
|
||||
# define atomic_exchange_24_rel(mem, newval) \
|
||||
({ \
|
||||
__typeof (*mem) __acev_w24ret; \
|
||||
if (sizeof (*mem) != 4) \
|
||||
abort (); \
|
||||
if (__atomic_is_v9) \
|
||||
__acev_w24ret = atomic_exchange_rel (mem, newval); \
|
||||
else \
|
||||
__acev_w24ret = __v7_exchange_24_rel (mem, newval); \
|
||||
__acev_w24ret; })
|
||||
|
||||
#define atomic_full_barrier() \
|
||||
do { \
|
||||
if (__atomic_is_v9) \
|
||||
/* membar #LoadLoad | #LoadStore | #StoreLoad | #StoreStore */ \
|
||||
__asm __volatile (".word 0x8143e00f" : : : "memory"); \
|
||||
else \
|
||||
__asm __volatile ("" : : : "memory"); \
|
||||
} while (0)
|
||||
|
||||
#define atomic_read_barrier() \
|
||||
do { \
|
||||
if (__atomic_is_v9) \
|
||||
/* membar #LoadLoad | #LoadStore */ \
|
||||
__asm __volatile (".word 0x8143e005" : : : "memory"); \
|
||||
else \
|
||||
__asm __volatile ("" : : : "memory"); \
|
||||
} while (0)
|
||||
|
||||
#define atomic_write_barrier() \
|
||||
do { \
|
||||
if (__atomic_is_v9) \
|
||||
/* membar #LoadStore | #StoreStore */ \
|
||||
__asm __volatile (".word 0x8143e00c" : : : "memory"); \
|
||||
else \
|
||||
__asm __volatile ("" : : : "memory"); \
|
||||
} while (0)
|
||||
|
||||
#endif
|
||||
|
||||
#include <sysdep.h>
|
||||
|
||||
#endif /* atomic-machine.h */
|
35
sysdeps/sparc/sparc32/configure
vendored
35
sysdeps/sparc/sparc32/configure
vendored
@ -160,3 +160,38 @@ $as_echo "$libc_cv_sparcv8" >&6; }
|
||||
if test $libc_cv_sparcv8 = no; then
|
||||
as_fn_error $? "no support for pre-v8 sparc" "$LINENO" 5
|
||||
fi
|
||||
|
||||
# Test if compiler generates external calls to libatomic for CAS operation.
|
||||
# It is suffice to check for int only and the test is similar of C11
|
||||
# atomic_compare_exchange_strong using GCC builtins.
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for external libatomic calls" >&5
|
||||
$as_echo_n "checking for external libatomic calls... " >&6; }
|
||||
if ${libc_cv_cas_uses_libatomic+:} false; then :
|
||||
$as_echo_n "(cached) " >&6
|
||||
else
|
||||
cat > conftest.c <<EOF
|
||||
_Bool foo (int *ptr, int *expected, int desired)
|
||||
{
|
||||
return __atomic_compare_exchange_n (ptr, expected, desired, 0,
|
||||
__ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
|
||||
}
|
||||
EOF
|
||||
libc_cv_cas_uses_libatomic=no
|
||||
if { ac_try='${CC-cc} -S conftest.c -o conftest.s 1>&5'
|
||||
{ { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_try\""; } >&5
|
||||
(eval $ac_try) 2>&5
|
||||
ac_status=$?
|
||||
$as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
|
||||
test $ac_status = 0; }; }; then
|
||||
if grep '__atomic_compare_exchange_4' conftest.s >/dev/null; then
|
||||
libc_cv_cas_uses_libatomic=yes
|
||||
fi
|
||||
fi
|
||||
rm -f conftest.c conftest.s
|
||||
|
||||
fi
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $libc_cv_cas_uses_libatomic" >&5
|
||||
$as_echo "$libc_cv_cas_uses_libatomic" >&6; }
|
||||
if test $libc_cv_cas_uses_libatomic = yes; then
|
||||
as_fn_error $? "external dependency of libatomic is not supported" "$LINENO" 5
|
||||
fi
|
||||
|
@ -11,3 +11,27 @@ AC_CACHE_CHECK([for at least sparcv8 support],
|
||||
if test $libc_cv_sparcv8 = no; then
|
||||
AC_MSG_ERROR([no support for pre-v8 sparc])
|
||||
fi
|
||||
|
||||
# Test if compiler generates external calls to libatomic for CAS operation.
|
||||
# It is suffice to check for int only and the test is similar of C11
|
||||
# atomic_compare_exchange_strong using GCC builtins.
|
||||
AC_CACHE_CHECK(for external libatomic calls,
|
||||
libc_cv_cas_uses_libatomic,
|
||||
[cat > conftest.c <<EOF
|
||||
_Bool foo (int *ptr, int *expected, int desired)
|
||||
{
|
||||
return __atomic_compare_exchange_n (ptr, expected, desired, 0,
|
||||
__ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
|
||||
}
|
||||
EOF
|
||||
libc_cv_cas_uses_libatomic=no
|
||||
if AC_TRY_COMMAND(${CC-cc} -S conftest.c -o conftest.s 1>&AS_MESSAGE_LOG_FD); then
|
||||
if grep '__atomic_compare_exchange_4' conftest.s >/dev/null; then
|
||||
libc_cv_cas_uses_libatomic=yes
|
||||
fi
|
||||
fi
|
||||
rm -f conftest.c conftest.s
|
||||
])
|
||||
if test $libc_cv_cas_uses_libatomic = yes; then
|
||||
AC_MSG_ERROR([external dependency of libatomic is not supported])
|
||||
fi
|
||||
|
@ -1,53 +0,0 @@
|
||||
/* low level locking for pthread library. SPARC version.
|
||||
Copyright (C) 2003-2019 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
Contributed by Paul Mackerras <paulus@au.ibm.com>, 2003.
|
||||
|
||||
The GNU C Library is free software; you can redistribute it and/or
|
||||
modify it under the terms of the GNU Lesser General Public
|
||||
License as published by the Free Software Foundation; either
|
||||
version 2.1 of the License, or (at your option) any later version.
|
||||
|
||||
The GNU C Library is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
Lesser General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Lesser General Public
|
||||
License along with the GNU C Library; if not, see
|
||||
<https://www.gnu.org/licenses/>. */
|
||||
|
||||
#include <errno.h>
|
||||
#include <sysdep.h>
|
||||
#include <lowlevellock.h>
|
||||
#include <sys/time.h>
|
||||
#include <time.h>
|
||||
|
||||
|
||||
void
|
||||
__lll_lock_wait_private (int *futex)
|
||||
{
|
||||
do
|
||||
{
|
||||
int oldval = atomic_compare_and_exchange_val_24_acq (futex, 2, 1);
|
||||
if (oldval != 0)
|
||||
lll_futex_wait (futex, 2, LLL_PRIVATE);
|
||||
}
|
||||
while (atomic_compare_and_exchange_val_24_acq (futex, 2, 0) != 0);
|
||||
}
|
||||
|
||||
|
||||
/* These functions don't get included in libc.so */
|
||||
#if IS_IN (libpthread)
|
||||
void
|
||||
__lll_lock_wait (int *futex, int private)
|
||||
{
|
||||
do
|
||||
{
|
||||
int oldval = atomic_compare_and_exchange_val_24_acq (futex, 2, 1);
|
||||
if (oldval != 0)
|
||||
lll_futex_wait (futex, 2, private);
|
||||
}
|
||||
while (atomic_compare_and_exchange_val_24_acq (futex, 2, 0) != 0);
|
||||
}
|
||||
#endif
|
@ -1 +0,0 @@
|
||||
#error No support for pthread barriers on pre-v9 sparc.
|
@ -1,82 +0,0 @@
|
||||
/* sem_post -- post to a POSIX semaphore. Generic futex-using version.
|
||||
Copyright (C) 2003-2019 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
Contributed by Jakub Jelinek <jakub@redhat.com>, 2003.
|
||||
|
||||
The GNU C Library is free software; you can redistribute it and/or
|
||||
modify it under the terms of the GNU Lesser General Public
|
||||
License as published by the Free Software Foundation; either
|
||||
version 2.1 of the License, or (at your option) any later version.
|
||||
|
||||
The GNU C Library is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
Lesser General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Lesser General Public
|
||||
License along with the GNU C Library; if not, see
|
||||
<https://www.gnu.org/licenses/>. */
|
||||
|
||||
#include <atomic.h>
|
||||
#include <errno.h>
|
||||
#include <sysdep.h>
|
||||
#include <lowlevellock.h>
|
||||
#include <internaltypes.h>
|
||||
#include <semaphore.h>
|
||||
#include <futex-internal.h>
|
||||
|
||||
#include <shlib-compat.h>
|
||||
|
||||
|
||||
/* See sem_wait for an explanation of the algorithm. */
|
||||
int
|
||||
__new_sem_post (sem_t *sem)
|
||||
{
|
||||
struct new_sem *isem = (struct new_sem *) sem;
|
||||
int private = isem->private;
|
||||
unsigned int v;
|
||||
|
||||
__sparc32_atomic_do_lock24 (&isem->pad);
|
||||
|
||||
v = isem->value;
|
||||
if ((v >> SEM_VALUE_SHIFT) == SEM_VALUE_MAX)
|
||||
{
|
||||
__sparc32_atomic_do_unlock24 (&isem->pad);
|
||||
|
||||
__set_errno (EOVERFLOW);
|
||||
return -1;
|
||||
}
|
||||
isem->value = v + (1 << SEM_VALUE_SHIFT);
|
||||
|
||||
__sparc32_atomic_do_unlock24 (&isem->pad);
|
||||
|
||||
if ((v & SEM_NWAITERS_MASK) != 0)
|
||||
futex_wake (&isem->value, 1, private);
|
||||
|
||||
return 0;
|
||||
}
|
||||
versioned_symbol (libpthread, __new_sem_post, sem_post, GLIBC_2_1);
|
||||
|
||||
|
||||
#if SHLIB_COMPAT (libpthread, GLIBC_2_0, GLIBC_2_1)
|
||||
int
|
||||
attribute_compat_text_section
|
||||
__old_sem_post (sem_t *sem)
|
||||
{
|
||||
int *futex = (int *) sem;
|
||||
|
||||
/* We must need to synchronize with consumers of this token, so the atomic
|
||||
increment must have release MO semantics. */
|
||||
atomic_write_barrier ();
|
||||
(void) atomic_increment_val (futex);
|
||||
/* We always have to assume it is a shared semaphore. */
|
||||
int err = lll_futex_wake (futex, 1, LLL_SHARED);
|
||||
if (__builtin_expect (err, 0) < 0)
|
||||
{
|
||||
__set_errno (-err);
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
compat_symbol (libpthread, __old_sem_post, sem_post, GLIBC_2_0);
|
||||
#endif
|
@ -1,146 +0,0 @@
|
||||
/* sem_waitcommon -- wait on a semaphore, shared code.
|
||||
Copyright (C) 2003-2019 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
Contributed by Paul Mackerras <paulus@au.ibm.com>, 2003.
|
||||
|
||||
The GNU C Library is free software; you can redistribute it and/or
|
||||
modify it under the terms of the GNU Lesser General Public
|
||||
License as published by the Free Software Foundation; either
|
||||
version 2.1 of the License, or (at your option) any later version.
|
||||
|
||||
The GNU C Library is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
Lesser General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Lesser General Public
|
||||
License along with the GNU C Library; if not, see
|
||||
<https://www.gnu.org/licenses/>. */
|
||||
|
||||
#include <errno.h>
|
||||
#include <sysdep.h>
|
||||
#include <futex-internal.h>
|
||||
#include <internaltypes.h>
|
||||
#include <semaphore.h>
|
||||
#include <sys/time.h>
|
||||
|
||||
#include <pthreadP.h>
|
||||
#include <shlib-compat.h>
|
||||
#include <atomic.h>
|
||||
|
||||
|
||||
static void
|
||||
__sem_wait_32_finish (struct new_sem *sem);
|
||||
|
||||
static void
|
||||
__sem_wait_cleanup (void *arg)
|
||||
{
|
||||
struct new_sem *sem = (struct new_sem *) arg;
|
||||
|
||||
__sem_wait_32_finish (sem);
|
||||
}
|
||||
|
||||
/* Wait until at least one token is available, possibly with a timeout.
|
||||
This is in a separate function in order to make sure gcc
|
||||
puts the call site into an exception region, and thus the
|
||||
cleanups get properly run. TODO still necessary? Other futex_wait
|
||||
users don't seem to need it. */
|
||||
static int
|
||||
__attribute__ ((noinline))
|
||||
do_futex_wait (struct new_sem *sem, const struct timespec *abstime)
|
||||
{
|
||||
int err;
|
||||
|
||||
err = futex_abstimed_wait_cancelable (&sem->value, SEM_NWAITERS_MASK,
|
||||
abstime, sem->private);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
/* Fast path: Try to grab a token without blocking. */
|
||||
static int
|
||||
__new_sem_wait_fast (struct new_sem *sem, int definitive_result)
|
||||
{
|
||||
unsigned int v;
|
||||
int ret = 0;
|
||||
|
||||
__sparc32_atomic_do_lock24(&sem->pad);
|
||||
|
||||
v = sem->value;
|
||||
if ((v >> SEM_VALUE_SHIFT) == 0)
|
||||
ret = -1;
|
||||
else
|
||||
sem->value = v - (1 << SEM_VALUE_SHIFT);
|
||||
|
||||
__sparc32_atomic_do_unlock24(&sem->pad);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Slow path that blocks. */
|
||||
static int
|
||||
__attribute__ ((noinline))
|
||||
__new_sem_wait_slow (struct new_sem *sem, const struct timespec *abstime)
|
||||
{
|
||||
unsigned int v;
|
||||
int err = 0;
|
||||
|
||||
__sparc32_atomic_do_lock24(&sem->pad);
|
||||
|
||||
sem->nwaiters++;
|
||||
|
||||
pthread_cleanup_push (__sem_wait_cleanup, sem);
|
||||
|
||||
/* Wait for a token to be available. Retry until we can grab one. */
|
||||
v = sem->value;
|
||||
do
|
||||
{
|
||||
if (!(v & SEM_NWAITERS_MASK))
|
||||
sem->value = v | SEM_NWAITERS_MASK;
|
||||
|
||||
/* If there is no token, wait. */
|
||||
if ((v >> SEM_VALUE_SHIFT) == 0)
|
||||
{
|
||||
__sparc32_atomic_do_unlock24(&sem->pad);
|
||||
|
||||
err = do_futex_wait(sem, abstime);
|
||||
if (err == ETIMEDOUT || err == EINTR)
|
||||
{
|
||||
__set_errno (err);
|
||||
err = -1;
|
||||
goto error;
|
||||
}
|
||||
err = 0;
|
||||
|
||||
__sparc32_atomic_do_lock24(&sem->pad);
|
||||
|
||||
/* We blocked, so there might be a token now. */
|
||||
v = sem->value;
|
||||
}
|
||||
}
|
||||
/* If there is no token, we must not try to grab one. */
|
||||
while ((v >> SEM_VALUE_SHIFT) == 0);
|
||||
|
||||
sem->value = v - (1 << SEM_VALUE_SHIFT);
|
||||
|
||||
__sparc32_atomic_do_unlock24(&sem->pad);
|
||||
|
||||
error:
|
||||
pthread_cleanup_pop (0);
|
||||
|
||||
__sem_wait_32_finish (sem);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
/* Stop being a registered waiter (non-64b-atomics code only). */
|
||||
static void
|
||||
__sem_wait_32_finish (struct new_sem *sem)
|
||||
{
|
||||
__sparc32_atomic_do_lock24(&sem->pad);
|
||||
|
||||
if (--sem->nwaiters == 0)
|
||||
sem->value &= ~SEM_NWAITERS_MASK;
|
||||
|
||||
__sparc32_atomic_do_unlock24(&sem->pad);
|
||||
}
|
@ -4,12 +4,3 @@ ASFLAGS-.o += -Wa,-Av9d
|
||||
ASFLAGS-.os += -Wa,-Av9d
|
||||
ASFLAGS-.op += -Wa,-Av9d
|
||||
ASFLAGS-.oS += -Wa,-Av9d
|
||||
|
||||
# nscd uses atomic_spin_nop which in turn requires cpu_relax
|
||||
ifeq ($(subdir),nscd)
|
||||
routines += cpu_relax
|
||||
endif
|
||||
|
||||
ifeq ($(subdir), nptl)
|
||||
libpthread-routines += cpu_relax
|
||||
endif
|
||||
|
@ -1,108 +0,0 @@
|
||||
/* Atomic operations. sparcv9 version.
|
||||
Copyright (C) 2003-2019 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
Contributed by Jakub Jelinek <jakub@redhat.com>, 2003.
|
||||
|
||||
The GNU C Library is free software; you can redistribute it and/or
|
||||
modify it under the terms of the GNU Lesser General Public
|
||||
License as published by the Free Software Foundation; either
|
||||
version 2.1 of the License, or (at your option) any later version.
|
||||
|
||||
The GNU C Library is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
Lesser General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Lesser General Public
|
||||
License along with the GNU C Library; if not, see
|
||||
<https://www.gnu.org/licenses/>. */
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
typedef int8_t atomic8_t;
|
||||
typedef uint8_t uatomic8_t;
|
||||
typedef int_fast8_t atomic_fast8_t;
|
||||
typedef uint_fast8_t uatomic_fast8_t;
|
||||
|
||||
typedef int16_t atomic16_t;
|
||||
typedef uint16_t uatomic16_t;
|
||||
typedef int_fast16_t atomic_fast16_t;
|
||||
typedef uint_fast16_t uatomic_fast16_t;
|
||||
|
||||
typedef int32_t atomic32_t;
|
||||
typedef uint32_t uatomic32_t;
|
||||
typedef int_fast32_t atomic_fast32_t;
|
||||
typedef uint_fast32_t uatomic_fast32_t;
|
||||
|
||||
typedef int64_t atomic64_t;
|
||||
typedef uint64_t uatomic64_t;
|
||||
typedef int_fast64_t atomic_fast64_t;
|
||||
typedef uint_fast64_t uatomic_fast64_t;
|
||||
|
||||
typedef intptr_t atomicptr_t;
|
||||
typedef uintptr_t uatomicptr_t;
|
||||
typedef intmax_t atomic_max_t;
|
||||
typedef uintmax_t uatomic_max_t;
|
||||
|
||||
#define __HAVE_64B_ATOMICS 0
|
||||
#define USE_ATOMIC_COMPILER_BUILTINS 0
|
||||
|
||||
/* XXX Is this actually correct? */
|
||||
#define ATOMIC_EXCHANGE_USES_CAS 0
|
||||
|
||||
|
||||
#define __arch_compare_and_exchange_val_8_acq(mem, newval, oldval) \
|
||||
(abort (), (__typeof (*mem)) 0)
|
||||
|
||||
#define __arch_compare_and_exchange_val_16_acq(mem, newval, oldval) \
|
||||
(abort (), (__typeof (*mem)) 0)
|
||||
|
||||
#define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \
|
||||
({ \
|
||||
__typeof (*(mem)) __acev_tmp; \
|
||||
__typeof (mem) __acev_mem = (mem); \
|
||||
if (__builtin_constant_p (oldval) && (oldval) == 0) \
|
||||
__asm __volatile ("cas [%3], %%g0, %0" \
|
||||
: "=r" (__acev_tmp), "=m" (*__acev_mem) \
|
||||
: "m" (*__acev_mem), "r" (__acev_mem), \
|
||||
"0" (newval) : "memory"); \
|
||||
else \
|
||||
__asm __volatile ("cas [%4], %2, %0" \
|
||||
: "=r" (__acev_tmp), "=m" (*__acev_mem) \
|
||||
: "r" (oldval), "m" (*__acev_mem), "r" (__acev_mem), \
|
||||
"0" (newval) : "memory"); \
|
||||
__acev_tmp; })
|
||||
|
||||
/* This can be implemented if needed. */
|
||||
#define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \
|
||||
(abort (), (__typeof (*mem)) 0)
|
||||
|
||||
#define atomic_exchange_acq(mem, newvalue) \
|
||||
({ __typeof (*(mem)) __oldval; \
|
||||
__typeof (mem) __memp = (mem); \
|
||||
__typeof (*(mem)) __value = (newvalue); \
|
||||
\
|
||||
if (sizeof (*(mem)) == 4) \
|
||||
__asm ("swap %0, %1" \
|
||||
: "=m" (*__memp), "=r" (__oldval) \
|
||||
: "m" (*__memp), "1" (__value) : "memory"); \
|
||||
else \
|
||||
abort (); \
|
||||
__oldval; })
|
||||
|
||||
#define atomic_compare_and_exchange_val_24_acq(mem, newval, oldval) \
|
||||
atomic_compare_and_exchange_val_acq (mem, newval, oldval)
|
||||
|
||||
#define atomic_exchange_24_rel(mem, newval) \
|
||||
atomic_exchange_rel (mem, newval)
|
||||
|
||||
#define atomic_full_barrier() \
|
||||
__asm __volatile ("membar #LoadLoad | #LoadStore" \
|
||||
" | #StoreLoad | #StoreStore" : : : "memory")
|
||||
#define atomic_read_barrier() \
|
||||
__asm __volatile ("membar #LoadLoad | #LoadStore" : : : "memory")
|
||||
#define atomic_write_barrier() \
|
||||
__asm __volatile ("membar #LoadStore | #StoreStore" : : : "memory")
|
||||
|
||||
extern void __cpu_relax (void);
|
||||
#define atomic_spin_nop() __cpu_relax ()
|
@ -1 +0,0 @@
|
||||
#include <sysdeps/sparc/sparc64/cpu_relax.c>
|
@ -29,15 +29,6 @@ ASFLAGS-.os += -Wa,-Av9d
|
||||
ASFLAGS-.op += -Wa,-Av9d
|
||||
ASFLAGS-.oS += -Wa,-Av9d
|
||||
|
||||
# nscd uses atomic_spin_nop which in turn requires cpu_relax
|
||||
ifeq ($(subdir),nscd)
|
||||
routines += cpu_relax
|
||||
endif
|
||||
|
||||
ifeq ($(subdir),nptl)
|
||||
libpthread-routines += cpu_relax
|
||||
endif
|
||||
|
||||
ifeq ($(subdir),soft-fp)
|
||||
sparc64-quad-routines := qp_add qp_cmp qp_cmpe qp_div qp_dtoq qp_feq qp_fge \
|
||||
qp_fgt qp_fle qp_flt qp_fne qp_itoq qp_mul qp_neg qp_qtod qp_qtoi \
|
||||
|
@ -1,129 +0,0 @@
|
||||
/* Atomic operations. sparc64 version.
|
||||
Copyright (C) 2003-2019 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
Contributed by Jakub Jelinek <jakub@redhat.com>, 2003.
|
||||
|
||||
The GNU C Library is free software; you can redistribute it and/or
|
||||
modify it under the terms of the GNU Lesser General Public
|
||||
License as published by the Free Software Foundation; either
|
||||
version 2.1 of the License, or (at your option) any later version.
|
||||
|
||||
The GNU C Library is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
Lesser General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Lesser General Public
|
||||
License along with the GNU C Library; if not, see
|
||||
<https://www.gnu.org/licenses/>. */
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
typedef int8_t atomic8_t;
|
||||
typedef uint8_t uatomic8_t;
|
||||
typedef int_fast8_t atomic_fast8_t;
|
||||
typedef uint_fast8_t uatomic_fast8_t;
|
||||
|
||||
typedef int16_t atomic16_t;
|
||||
typedef uint16_t uatomic16_t;
|
||||
typedef int_fast16_t atomic_fast16_t;
|
||||
typedef uint_fast16_t uatomic_fast16_t;
|
||||
|
||||
typedef int32_t atomic32_t;
|
||||
typedef uint32_t uatomic32_t;
|
||||
typedef int_fast32_t atomic_fast32_t;
|
||||
typedef uint_fast32_t uatomic_fast32_t;
|
||||
|
||||
typedef int64_t atomic64_t;
|
||||
typedef uint64_t uatomic64_t;
|
||||
typedef int_fast64_t atomic_fast64_t;
|
||||
typedef uint_fast64_t uatomic_fast64_t;
|
||||
|
||||
typedef intptr_t atomicptr_t;
|
||||
typedef uintptr_t uatomicptr_t;
|
||||
typedef intmax_t atomic_max_t;
|
||||
typedef uintmax_t uatomic_max_t;
|
||||
|
||||
#define __HAVE_64B_ATOMICS 1
|
||||
#define USE_ATOMIC_COMPILER_BUILTINS 0
|
||||
|
||||
/* XXX Is this actually correct? */
|
||||
#define ATOMIC_EXCHANGE_USES_CAS 1
|
||||
|
||||
|
||||
#define __arch_compare_and_exchange_val_8_acq(mem, newval, oldval) \
|
||||
(abort (), (__typeof (*mem)) 0)
|
||||
|
||||
#define __arch_compare_and_exchange_val_16_acq(mem, newval, oldval) \
|
||||
(abort (), (__typeof (*mem)) 0)
|
||||
|
||||
#define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \
|
||||
({ \
|
||||
__typeof (*(mem)) __acev_tmp; \
|
||||
__typeof (mem) __acev_mem = (mem); \
|
||||
if (__builtin_constant_p (oldval) && (oldval) == 0) \
|
||||
__asm __volatile ("cas [%3], %%g0, %0" \
|
||||
: "=r" (__acev_tmp), "=m" (*__acev_mem) \
|
||||
: "m" (*__acev_mem), "r" (__acev_mem), \
|
||||
"0" (newval) : "memory"); \
|
||||
else \
|
||||
__asm __volatile ("cas [%4], %2, %0" \
|
||||
: "=r" (__acev_tmp), "=m" (*__acev_mem) \
|
||||
: "r" (oldval), "m" (*__acev_mem), "r" (__acev_mem), \
|
||||
"0" (newval) : "memory"); \
|
||||
__acev_tmp; })
|
||||
|
||||
#define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \
|
||||
({ \
|
||||
__typeof (*(mem)) __acev_tmp; \
|
||||
__typeof (mem) __acev_mem = (mem); \
|
||||
if (__builtin_constant_p (oldval) && (oldval) == 0) \
|
||||
__asm __volatile ("casx [%3], %%g0, %0" \
|
||||
: "=r" (__acev_tmp), "=m" (*__acev_mem) \
|
||||
: "m" (*__acev_mem), "r" (__acev_mem), \
|
||||
"0" ((long) (newval)) : "memory"); \
|
||||
else \
|
||||
__asm __volatile ("casx [%4], %2, %0" \
|
||||
: "=r" (__acev_tmp), "=m" (*__acev_mem) \
|
||||
: "r" ((long) (oldval)), "m" (*__acev_mem), \
|
||||
"r" (__acev_mem), "0" ((long) (newval)) : "memory"); \
|
||||
__acev_tmp; })
|
||||
|
||||
#define atomic_exchange_acq(mem, newvalue) \
|
||||
({ __typeof (*(mem)) __oldval, __val; \
|
||||
__typeof (mem) __memp = (mem); \
|
||||
__typeof (*(mem)) __value = (newvalue); \
|
||||
\
|
||||
if (sizeof (*(mem)) == 4) \
|
||||
__asm ("swap %0, %1" \
|
||||
: "=m" (*__memp), "=r" (__oldval) \
|
||||
: "m" (*__memp), "1" (__value) : "memory"); \
|
||||
else \
|
||||
{ \
|
||||
__val = *__memp; \
|
||||
do \
|
||||
{ \
|
||||
__oldval = __val; \
|
||||
__val = atomic_compare_and_exchange_val_acq (__memp, __value, \
|
||||
__oldval); \
|
||||
} \
|
||||
while (__builtin_expect (__val != __oldval, 0)); \
|
||||
} \
|
||||
__oldval; })
|
||||
|
||||
#define atomic_compare_and_exchange_val_24_acq(mem, newval, oldval) \
|
||||
atomic_compare_and_exchange_val_acq (mem, newval, oldval)
|
||||
|
||||
#define atomic_exchange_24_rel(mem, newval) \
|
||||
atomic_exchange_rel (mem, newval)
|
||||
|
||||
#define atomic_full_barrier() \
|
||||
__asm __volatile ("membar #LoadLoad | #LoadStore" \
|
||||
" | #StoreLoad | #StoreStore" : : : "memory")
|
||||
#define atomic_read_barrier() \
|
||||
__asm __volatile ("membar #LoadLoad | #LoadStore" : : : "memory")
|
||||
#define atomic_write_barrier() \
|
||||
__asm __volatile ("membar #LoadStore | #StoreStore" : : : "memory")
|
||||
|
||||
extern void __cpu_relax (void);
|
||||
#define atomic_spin_nop() __cpu_relax ()
|
@ -1,130 +0,0 @@
|
||||
/* Copyright (C) 2003-2019 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
Contributed by Jakub Jelinek <jakub@redhat.com>, 2003.
|
||||
|
||||
The GNU C Library is free software; you can redistribute it and/or
|
||||
modify it under the terms of the GNU Lesser General Public
|
||||
License as published by the Free Software Foundation; either
|
||||
version 2.1 of the License, or (at your option) any later version.
|
||||
|
||||
The GNU C Library is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
Lesser General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Lesser General Public
|
||||
License along with the GNU C Library; if not, see
|
||||
<https://www.gnu.org/licenses/>. */
|
||||
|
||||
#ifndef _LOWLEVELLOCK_H
|
||||
#define _LOWLEVELLOCK_H 1
|
||||
|
||||
#include <time.h>
|
||||
#include <sys/param.h>
|
||||
#include <bits/pthreadtypes.h>
|
||||
#include <atomic.h>
|
||||
#include <kernel-features.h>
|
||||
#include <errno.h>
|
||||
|
||||
#include <lowlevellock-futex.h>
|
||||
|
||||
static inline int
|
||||
__attribute__ ((always_inline))
|
||||
__lll_trylock (int *futex)
|
||||
{
|
||||
return atomic_compare_and_exchange_val_24_acq (futex, 1, 0) != 0;
|
||||
}
|
||||
#define lll_trylock(futex) __lll_trylock (&(futex))
|
||||
|
||||
static inline int
|
||||
__attribute__ ((always_inline))
|
||||
__lll_cond_trylock (int *futex)
|
||||
{
|
||||
return atomic_compare_and_exchange_val_24_acq (futex, 2, 0) != 0;
|
||||
}
|
||||
#define lll_cond_trylock(futex) __lll_cond_trylock (&(futex))
|
||||
|
||||
|
||||
extern void __lll_lock_wait_private (int *futex) attribute_hidden;
|
||||
extern void __lll_lock_wait (int *futex, int private) attribute_hidden;
|
||||
|
||||
static inline void
|
||||
__attribute__ ((always_inline))
|
||||
__lll_lock (int *futex, int private)
|
||||
{
|
||||
int val = atomic_compare_and_exchange_val_24_acq (futex, 1, 0);
|
||||
|
||||
if (__glibc_unlikely (val != 0))
|
||||
{
|
||||
if (__builtin_constant_p (private) && private == LLL_PRIVATE)
|
||||
__lll_lock_wait_private (futex);
|
||||
else
|
||||
__lll_lock_wait (futex, private);
|
||||
}
|
||||
}
|
||||
#define lll_lock(futex, private) __lll_lock (&(futex), private)
|
||||
|
||||
static inline void
|
||||
__attribute__ ((always_inline))
|
||||
__lll_cond_lock (int *futex, int private)
|
||||
{
|
||||
int val = atomic_compare_and_exchange_val_24_acq (futex, 2, 0);
|
||||
|
||||
if (__glibc_unlikely (val != 0))
|
||||
__lll_lock_wait (futex, private);
|
||||
}
|
||||
#define lll_cond_lock(futex, private) __lll_cond_lock (&(futex), private)
|
||||
|
||||
|
||||
extern int __lll_clocklock_wait (int *futex, clockid_t, int val,
|
||||
const struct timespec *,
|
||||
int private) attribute_hidden;
|
||||
|
||||
#define lll_timedwait(futex, val, clockid, abstime, private) \
|
||||
__lll_clocklock_wait (futex, val, clockid, abstime, private)
|
||||
|
||||
static inline int
|
||||
__attribute__ ((always_inline))
|
||||
__lll_clocklock (int *futex, clockid_t clockid,
|
||||
const struct timespec *abstime, int private)
|
||||
{
|
||||
int val = atomic_compare_and_exchange_val_24_acq (futex, 1, 0);
|
||||
int result = 0;
|
||||
|
||||
if (__glibc_unlikely (val != 0))
|
||||
{
|
||||
do
|
||||
{
|
||||
int oldval = atomic_compare_and_exchange_val_24_acq (futex, val, 1);
|
||||
if (oldval != 0)
|
||||
{
|
||||
result = __lll_clocklock_wait (futex, 2, clockid, abstime,
|
||||
private);
|
||||
if (result == EINVAL || result == ETIMEDOUT)
|
||||
break;
|
||||
}
|
||||
}
|
||||
while (atomic_compare_and_exchange_val_24_acq (futex, val, 0) != 0);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
#define lll_clocklock(futex, clockid, abstime, private) \
|
||||
__lll_clocklock (&(futex), clockid, abstime, private)
|
||||
|
||||
#define lll_unlock(lock, private) \
|
||||
((void) ({ \
|
||||
int *__futex = &(lock); \
|
||||
int __private = (private); \
|
||||
int __val = atomic_exchange_24_rel (__futex, 0); \
|
||||
if (__glibc_unlikely (__val > 1)) \
|
||||
lll_futex_wake (__futex, 1, __private); \
|
||||
}))
|
||||
|
||||
#define lll_islocked(futex) \
|
||||
(futex != 0)
|
||||
|
||||
/* Initializers for lock. */
|
||||
#define LLL_LOCK_INITIALIZER (0)
|
||||
#define LLL_LOCK_INITIALIZER_LOCKED (1)
|
||||
|
||||
#endif /* lowlevellock.h */
|
Loading…
Reference in New Issue
Block a user