2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-25 21:54:06 +08:00

ARC: Reduce bitops lines of code using macros

No semantical changes !

Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
This commit is contained in:
Vineet Gupta 2015-03-31 22:38:21 +05:30
parent b8a0330239
commit 04e2eee4b0

View File

@ -18,83 +18,50 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/compiler.h> #include <linux/compiler.h>
#include <asm/barrier.h> #include <asm/barrier.h>
#ifndef CONFIG_ARC_HAS_LLSC
#include <asm/smp.h>
#endif
/*
* Hardware assisted read-modify-write using ARC700 LLOCK/SCOND insns.
* The Kconfig glue ensures that in SMP, this is only set if the container
* SoC/platform has cross-core coherent LLOCK/SCOND
*/
#if defined(CONFIG_ARC_HAS_LLSC) #if defined(CONFIG_ARC_HAS_LLSC)
static inline void set_bit(unsigned long nr, volatile unsigned long *m) /*
{ * Hardware assisted Atomic-R-M-W
unsigned int temp;
m += nr >> 5;
/*
* ARC ISA micro-optimization:
*
* Instructions dealing with bitpos only consider lower 5 bits (0-31)
* e.g (x << 33) is handled like (x << 1) by ASL instruction
* (mem pointer still needs adjustment to point to next word)
*
* Hence the masking to clamp @nr arg can be elided in general.
*
* However if @nr is a constant (above assumed it in a register),
* and greater than 31, gcc can optimize away (x << 33) to 0,
* as overflow, given the 32-bit ISA. Thus masking needs to be done
* for constant @nr, but no code is generated due to const prop.
*/ */
if (__builtin_constant_p(nr))
nr &= 0x1f;
__asm__ __volatile__( #define BIT_OP(op, c_op, asm_op) \
"1: llock %0, [%1] \n" static inline void op##_bit(unsigned long nr, volatile unsigned long *m)\
" bset %0, %0, %2 \n" { \
" scond %0, [%1] \n" unsigned int temp; \
" bnz 1b \n" \
: "=&r"(temp) m += nr >> 5; \
: "r"(m), "ir"(nr) \
: "cc"); /* \
} * ARC ISA micro-optimization: \
* \
static inline void clear_bit(unsigned long nr, volatile unsigned long *m) * Instructions dealing with bitpos only consider lower 5 bits \
{ * e.g (x << 33) is handled like (x << 1) by ASL instruction \
unsigned int temp; * (mem pointer still needs adjustment to point to next word) \
* \
m += nr >> 5; * Hence the masking to clamp @nr arg can be elided in general. \
* \
if (__builtin_constant_p(nr)) * However if @nr is a constant (above assumed in a register), \
nr &= 0x1f; * and greater than 31, gcc can optimize away (x << 33) to 0, \
* as overflow, given the 32-bit ISA. Thus masking needs to be \
__asm__ __volatile__( * done for const @nr, but no code is generated due to gcc \
"1: llock %0, [%1] \n" * const prop. \
" bclr %0, %0, %2 \n" */ \
" scond %0, [%1] \n" if (__builtin_constant_p(nr)) \
" bnz 1b \n" nr &= 0x1f; \
: "=&r"(temp) \
: "r"(m), "ir"(nr) __asm__ __volatile__( \
: "cc"); "1: llock %0, [%1] \n" \
} " " #asm_op " %0, %0, %2 \n" \
" scond %0, [%1] \n" \
static inline void change_bit(unsigned long nr, volatile unsigned long *m) " bnz 1b \n" \
{ : "=&r"(temp) /* Early clobber, to prevent reg reuse */ \
unsigned int temp; : "r"(m), /* Not "m": llock only supports reg direct addr mode */ \
"ir"(nr) \
m += nr >> 5; : "cc"); \
if (__builtin_constant_p(nr))
nr &= 0x1f;
__asm__ __volatile__(
"1: llock %0, [%1] \n"
" bxor %0, %0, %2 \n"
" scond %0, [%1] \n"
" bnz 1b \n"
: "=&r"(temp)
: "r"(m), "ir"(nr)
: "cc");
} }
/* /*
@ -108,91 +75,38 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *m)
* Since ARC lacks a equivalent h/w primitive, the bit is set unconditionally * Since ARC lacks a equivalent h/w primitive, the bit is set unconditionally
* and the old value of bit is returned * and the old value of bit is returned
*/ */
static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *m) #define TEST_N_BIT_OP(op, c_op, asm_op) \
{ static inline int test_and_##op##_bit(unsigned long nr, volatile unsigned long *m)\
unsigned long old, temp; { \
unsigned long old, temp; \
m += nr >> 5; \
m += nr >> 5; \
if (__builtin_constant_p(nr)) \
nr &= 0x1f; if (__builtin_constant_p(nr)) \
nr &= 0x1f; \
/* \
* Explicit full memory barrier needed before/after as /* \
* LLOCK/SCOND themselves don't provide any such semantics * Explicit full memory barrier needed before/after as \
*/ * LLOCK/SCOND themselves don't provide any such smenatic \
smp_mb(); */ \
smp_mb(); \
__asm__ __volatile__( \
"1: llock %0, [%2] \n" __asm__ __volatile__( \
" bset %1, %0, %3 \n" "1: llock %0, [%2] \n" \
" scond %1, [%2] \n" " " #asm_op " %1, %0, %3 \n" \
" bnz 1b \n" " scond %1, [%2] \n" \
: "=&r"(old), "=&r"(temp) " bnz 1b \n" \
: "r"(m), "ir"(nr) : "=&r"(old), "=&r"(temp) \
: "cc"); : "r"(m), "ir"(nr) \
: "cc"); \
smp_mb(); \
smp_mb(); \
return (old & (1 << nr)) != 0; \
} return (old & (1 << nr)) != 0; \
static inline int
test_and_clear_bit(unsigned long nr, volatile unsigned long *m)
{
unsigned int old, temp;
m += nr >> 5;
if (__builtin_constant_p(nr))
nr &= 0x1f;
smp_mb();
__asm__ __volatile__(
"1: llock %0, [%2] \n"
" bclr %1, %0, %3 \n"
" scond %1, [%2] \n"
" bnz 1b \n"
: "=&r"(old), "=&r"(temp)
: "r"(m), "ir"(nr)
: "cc");
smp_mb();
return (old & (1 << nr)) != 0;
}
static inline int
test_and_change_bit(unsigned long nr, volatile unsigned long *m)
{
unsigned int old, temp;
m += nr >> 5;
if (__builtin_constant_p(nr))
nr &= 0x1f;
smp_mb();
__asm__ __volatile__(
"1: llock %0, [%2] \n"
" bxor %1, %0, %3 \n"
" scond %1, [%2] \n"
" bnz 1b \n"
: "=&r"(old), "=&r"(temp)
: "r"(m), "ir"(nr)
: "cc");
smp_mb();
return (old & (1 << nr)) != 0;
} }
#else /* !CONFIG_ARC_HAS_LLSC */ #else /* !CONFIG_ARC_HAS_LLSC */
#include <asm/smp.h>
/* /*
* Non hardware assisted Atomic-R-M-W * Non hardware assisted Atomic-R-M-W
* Locking would change to irq-disabling only (UP) and spinlocks (SMP) * Locking would change to irq-disabling only (UP) and spinlocks (SMP)
@ -209,111 +123,43 @@ test_and_change_bit(unsigned long nr, volatile unsigned long *m)
* at compile time) * at compile time)
*/ */
static inline void set_bit(unsigned long nr, volatile unsigned long *m) #define BIT_OP(op, c_op, asm_op) \
{ static inline void op##_bit(unsigned long nr, volatile unsigned long *m)\
unsigned long temp, flags; { \
m += nr >> 5; unsigned long temp, flags; \
m += nr >> 5; \
if (__builtin_constant_p(nr)) \
nr &= 0x1f; if (__builtin_constant_p(nr)) \
nr &= 0x1f; \
bitops_lock(flags); \
/* \
temp = *m; * spin lock/unlock provide the needed smp_mb() before/after \
*m = temp | (1UL << nr); */ \
bitops_lock(flags); \
bitops_unlock(flags); \
temp = *m; \
*m = temp c_op (1UL << nr); \
\
bitops_unlock(flags); \
} }
static inline void clear_bit(unsigned long nr, volatile unsigned long *m) #define TEST_N_BIT_OP(op, c_op, asm_op) \
{ static inline int test_and_##op##_bit(unsigned long nr, volatile unsigned long *m)\
unsigned long temp, flags; { \
m += nr >> 5; unsigned long old, flags; \
m += nr >> 5; \
if (__builtin_constant_p(nr)) \
nr &= 0x1f; if (__builtin_constant_p(nr)) \
nr &= 0x1f; \
bitops_lock(flags); \
bitops_lock(flags); \
temp = *m; \
*m = temp & ~(1UL << nr); old = *m; \
*m = old c_op (1 << nr); \
bitops_unlock(flags); \
} bitops_unlock(flags); \
\
static inline void change_bit(unsigned long nr, volatile unsigned long *m) return (old & (1 << nr)) != 0; \
{
unsigned long temp, flags;
m += nr >> 5;
if (__builtin_constant_p(nr))
nr &= 0x1f;
bitops_lock(flags);
temp = *m;
*m = temp ^ (1UL << nr);
bitops_unlock(flags);
}
static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *m)
{
unsigned long old, flags;
m += nr >> 5;
if (__builtin_constant_p(nr))
nr &= 0x1f;
/*
* spin lock/unlock provide the needed smp_mb() before/after
*/
bitops_lock(flags);
old = *m;
*m = old | (1 << nr);
bitops_unlock(flags);
return (old & (1 << nr)) != 0;
}
static inline int
test_and_clear_bit(unsigned long nr, volatile unsigned long *m)
{
unsigned long old, flags;
m += nr >> 5;
if (__builtin_constant_p(nr))
nr &= 0x1f;
bitops_lock(flags);
old = *m;
*m = old & ~(1 << nr);
bitops_unlock(flags);
return (old & (1 << nr)) != 0;
}
static inline int
test_and_change_bit(unsigned long nr, volatile unsigned long *m)
{
unsigned long old, flags;
m += nr >> 5;
if (__builtin_constant_p(nr))
nr &= 0x1f;
bitops_lock(flags);
old = *m;
*m = old ^ (1 << nr);
bitops_unlock(flags);
return (old & (1 << nr)) != 0;
} }
#endif /* CONFIG_ARC_HAS_LLSC */ #endif /* CONFIG_ARC_HAS_LLSC */
@ -322,86 +168,51 @@ test_and_change_bit(unsigned long nr, volatile unsigned long *m)
* Non atomic variants * Non atomic variants
**************************************/ **************************************/
static inline void __set_bit(unsigned long nr, volatile unsigned long *m) #define __BIT_OP(op, c_op, asm_op) \
{ static inline void __##op##_bit(unsigned long nr, volatile unsigned long *m) \
unsigned long temp; { \
m += nr >> 5; unsigned long temp; \
m += nr >> 5; \
if (__builtin_constant_p(nr)) \
nr &= 0x1f; if (__builtin_constant_p(nr)) \
nr &= 0x1f; \
temp = *m; \
*m = temp | (1UL << nr); temp = *m; \
*m = temp c_op (1UL << nr); \
} }
static inline void __clear_bit(unsigned long nr, volatile unsigned long *m) #define __TEST_N_BIT_OP(op, c_op, asm_op) \
{ static inline int __test_and_##op##_bit(unsigned long nr, volatile unsigned long *m)\
unsigned long temp; { \
m += nr >> 5; unsigned long old; \
m += nr >> 5; \
if (__builtin_constant_p(nr)) \
nr &= 0x1f; if (__builtin_constant_p(nr)) \
nr &= 0x1f; \
temp = *m; \
*m = temp & ~(1UL << nr); old = *m; \
*m = old c_op (1 << nr); \
\
return (old & (1 << nr)) != 0; \
} }
static inline void __change_bit(unsigned long nr, volatile unsigned long *m) #define BIT_OPS(op, c_op, asm_op) \
{ \
unsigned long temp; /* set_bit(), clear_bit(), change_bit() */ \
m += nr >> 5; BIT_OP(op, c_op, asm_op) \
\
/* test_and_set_bit(), test_and_clear_bit(), test_and_change_bit() */\
TEST_N_BIT_OP(op, c_op, asm_op) \
\
/* __set_bit(), __clear_bit(), __change_bit() */ \
__BIT_OP(op, c_op, asm_op) \
\
/* __test_and_set_bit(), __test_and_clear_bit(), __test_and_change_bit() */\
__TEST_N_BIT_OP(op, c_op, asm_op)
if (__builtin_constant_p(nr)) BIT_OPS(set, |, bset)
nr &= 0x1f; BIT_OPS(clear, & ~, bclr)
BIT_OPS(change, ^, bxor)
temp = *m;
*m = temp ^ (1UL << nr);
}
static inline int
__test_and_set_bit(unsigned long nr, volatile unsigned long *m)
{
unsigned long old;
m += nr >> 5;
if (__builtin_constant_p(nr))
nr &= 0x1f;
old = *m;
*m = old | (1 << nr);
return (old & (1 << nr)) != 0;
}
static inline int
__test_and_clear_bit(unsigned long nr, volatile unsigned long *m)
{
unsigned long old;
m += nr >> 5;
if (__builtin_constant_p(nr))
nr &= 0x1f;
old = *m;
*m = old & ~(1 << nr);
return (old & (1 << nr)) != 0;
}
static inline int
__test_and_change_bit(unsigned long nr, volatile unsigned long *m)
{
unsigned long old;
m += nr >> 5;
if (__builtin_constant_p(nr))
nr &= 0x1f;
old = *m;
*m = old ^ (1 << nr);
return (old & (1 << nr)) != 0;
}
/* /*
* This routine doesn't need to be atomic. * This routine doesn't need to be atomic.