2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-21 03:33:59 +08:00

ARC: add barriers to futex code

The atomic ops on futex need to provide the full barrier just like
regular atomics in kernel.

Also remove pagefault_enable/disable in futex_atomic_cmpxchg_inatomic()
as core code already does that

Cc: David Hildenbrand <dahi@linux.vnet.ibm.com>
Cc: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Michel Lespinasse <walken@google.com>
Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
This commit is contained in:
Vineet Gupta 2015-08-05 19:10:02 +05:30
parent 1648c70d30
commit 31d30c8208

View File

@ -20,6 +20,7 @@
#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg)\ #define __futex_atomic_op(insn, ret, oldval, uaddr, oparg)\
\ \
smp_mb(); \
__asm__ __volatile__( \ __asm__ __volatile__( \
"1: llock %1, [%2] \n" \ "1: llock %1, [%2] \n" \
insn "\n" \ insn "\n" \
@ -40,12 +41,14 @@
\ \
: "=&r" (ret), "=&r" (oldval) \ : "=&r" (ret), "=&r" (oldval) \
: "r" (uaddr), "r" (oparg), "ir" (-EFAULT) \ : "r" (uaddr), "r" (oparg), "ir" (-EFAULT) \
: "cc", "memory") : "cc", "memory"); \
smp_mb() \
#else /* !CONFIG_ARC_HAS_LLSC */ #else /* !CONFIG_ARC_HAS_LLSC */
#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg)\ #define __futex_atomic_op(insn, ret, oldval, uaddr, oparg)\
\ \
smp_mb(); \
__asm__ __volatile__( \ __asm__ __volatile__( \
"1: ld %1, [%2] \n" \ "1: ld %1, [%2] \n" \
insn "\n" \ insn "\n" \
@ -65,7 +68,8 @@
\ \
: "=&r" (ret), "=&r" (oldval) \ : "=&r" (ret), "=&r" (oldval) \
: "r" (uaddr), "r" (oparg), "ir" (-EFAULT) \ : "r" (uaddr), "r" (oparg), "ir" (-EFAULT) \
: "cc", "memory") : "cc", "memory"); \
smp_mb() \
#endif #endif
@ -134,13 +138,8 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
return ret; return ret;
} }
/* Compare-xchg with pagefaults disabled. /*
* Notes: * cmpxchg of futex (pagefaults disabled by caller)
* -Best-Effort: Exchg happens only if compare succeeds.
* If compare fails, returns; leaving retry/looping to upper layers
* -successful cmp-xchg: return orig value in @addr (same as cmp val)
* -Compare fails: return orig value in @addr
* -user access r/w fails: return -EFAULT
*/ */
static inline int static inline int
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval, futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval,
@ -151,7 +150,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval,
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
return -EFAULT; return -EFAULT;
pagefault_disable(); smp_mb();
__asm__ __volatile__( __asm__ __volatile__(
#ifdef CONFIG_ARC_HAS_LLSC #ifdef CONFIG_ARC_HAS_LLSC
@ -178,7 +177,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval,
: "r"(oldval), "r"(newval), "r"(uaddr), "ir"(-EFAULT) : "r"(oldval), "r"(newval), "r"(uaddr), "ir"(-EFAULT)
: "cc", "memory"); : "cc", "memory");
pagefault_enable(); smp_mb();
*uval = val; *uval = val;
return val; return val;