mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-28 07:04:00 +08:00
1deab8ce2c
Pull sparc updates from David Miller: 1) Add missing cmpxchg64() for 32-bit sparc. 2) Timer conversions from Allen Pais and Kees Cook. 3) vDSO support, from Nagarathnam Muthusamy. 4) Fix sparc64 huge page table walks based upon bug report by Al Viro, from Nitin Gupta. 5) Optimized fls() for T4 and above, from Vijay Kumar. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc: sparc64: Fix page table walk for PUD hugepages sparc64: Convert timers to user timer_setup() sparc64: convert mdesc_handle.refcnt from atomic_t to refcount_t sparc/led: Convert timers to use timer_setup() sparc64: Use sparc optimized fls and __fls for T4 and above sparc64: SPARC optimized __fls function sparc64: SPARC optimized fls function sparc64: Define SPARC default __fls function sparc64: Define SPARC default fls function vDSO for sparc sparc32: Add cmpxchg64(). sbus: char: Move D7S_MINOR to include/linux/miscdevice.h sparc: time: Remove unneeded linux/miscdevice.h include sparc64: mmu_context: Add missing include files
203 lines
4.3 KiB
C
203 lines
4.3 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* atomic32.c: 32-bit atomic_t implementation
|
|
*
|
|
* Copyright (C) 2004 Keith M Wesolowski
|
|
* Copyright (C) 2007 Kyle McMartin
|
|
*
|
|
* Based on asm-parisc/atomic.h Copyright (C) 2000 Philipp Rumpf
|
|
*/
|
|
|
|
#include <linux/atomic.h>
|
|
#include <linux/spinlock.h>
|
|
#include <linux/module.h>
|
|
|
|
#ifdef CONFIG_SMP
|
|
#define ATOMIC_HASH_SIZE 4
|
|
#define ATOMIC_HASH(a) (&__atomic_hash[(((unsigned long)a)>>8) & (ATOMIC_HASH_SIZE-1)])
|
|
|
|
spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] = {
|
|
[0 ... (ATOMIC_HASH_SIZE-1)] = __SPIN_LOCK_UNLOCKED(__atomic_hash)
|
|
};
|
|
|
|
#else /* SMP */
|
|
|
|
static DEFINE_SPINLOCK(dummy);
|
|
#define ATOMIC_HASH_SIZE 1
|
|
#define ATOMIC_HASH(a) (&dummy)
|
|
|
|
#endif /* SMP */
|
|
|
|
#define ATOMIC_FETCH_OP(op, c_op) \
|
|
int atomic_fetch_##op(int i, atomic_t *v) \
|
|
{ \
|
|
int ret; \
|
|
unsigned long flags; \
|
|
spin_lock_irqsave(ATOMIC_HASH(v), flags); \
|
|
\
|
|
ret = v->counter; \
|
|
v->counter c_op i; \
|
|
\
|
|
spin_unlock_irqrestore(ATOMIC_HASH(v), flags); \
|
|
return ret; \
|
|
} \
|
|
EXPORT_SYMBOL(atomic_fetch_##op);
|
|
|
|
#define ATOMIC_OP_RETURN(op, c_op) \
|
|
int atomic_##op##_return(int i, atomic_t *v) \
|
|
{ \
|
|
int ret; \
|
|
unsigned long flags; \
|
|
spin_lock_irqsave(ATOMIC_HASH(v), flags); \
|
|
\
|
|
ret = (v->counter c_op i); \
|
|
\
|
|
spin_unlock_irqrestore(ATOMIC_HASH(v), flags); \
|
|
return ret; \
|
|
} \
|
|
EXPORT_SYMBOL(atomic_##op##_return);
|
|
|
|
ATOMIC_OP_RETURN(add, +=)
|
|
|
|
ATOMIC_FETCH_OP(add, +=)
|
|
ATOMIC_FETCH_OP(and, &=)
|
|
ATOMIC_FETCH_OP(or, |=)
|
|
ATOMIC_FETCH_OP(xor, ^=)
|
|
|
|
#undef ATOMIC_FETCH_OP
|
|
#undef ATOMIC_OP_RETURN
|
|
|
|
int atomic_xchg(atomic_t *v, int new)
|
|
{
|
|
int ret;
|
|
unsigned long flags;
|
|
|
|
spin_lock_irqsave(ATOMIC_HASH(v), flags);
|
|
ret = v->counter;
|
|
v->counter = new;
|
|
spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
|
|
return ret;
|
|
}
|
|
EXPORT_SYMBOL(atomic_xchg);
|
|
|
|
int atomic_cmpxchg(atomic_t *v, int old, int new)
|
|
{
|
|
int ret;
|
|
unsigned long flags;
|
|
|
|
spin_lock_irqsave(ATOMIC_HASH(v), flags);
|
|
ret = v->counter;
|
|
if (likely(ret == old))
|
|
v->counter = new;
|
|
|
|
spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
|
|
return ret;
|
|
}
|
|
EXPORT_SYMBOL(atomic_cmpxchg);
|
|
|
|
int __atomic_add_unless(atomic_t *v, int a, int u)
|
|
{
|
|
int ret;
|
|
unsigned long flags;
|
|
|
|
spin_lock_irqsave(ATOMIC_HASH(v), flags);
|
|
ret = v->counter;
|
|
if (ret != u)
|
|
v->counter += a;
|
|
spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
|
|
return ret;
|
|
}
|
|
EXPORT_SYMBOL(__atomic_add_unless);
|
|
|
|
/* Atomic operations are already serializing */
|
|
void atomic_set(atomic_t *v, int i)
|
|
{
|
|
unsigned long flags;
|
|
|
|
spin_lock_irqsave(ATOMIC_HASH(v), flags);
|
|
v->counter = i;
|
|
spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
|
|
}
|
|
EXPORT_SYMBOL(atomic_set);
|
|
|
|
unsigned long ___set_bit(unsigned long *addr, unsigned long mask)
|
|
{
|
|
unsigned long old, flags;
|
|
|
|
spin_lock_irqsave(ATOMIC_HASH(addr), flags);
|
|
old = *addr;
|
|
*addr = old | mask;
|
|
spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
|
|
|
|
return old & mask;
|
|
}
|
|
EXPORT_SYMBOL(___set_bit);
|
|
|
|
unsigned long ___clear_bit(unsigned long *addr, unsigned long mask)
|
|
{
|
|
unsigned long old, flags;
|
|
|
|
spin_lock_irqsave(ATOMIC_HASH(addr), flags);
|
|
old = *addr;
|
|
*addr = old & ~mask;
|
|
spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
|
|
|
|
return old & mask;
|
|
}
|
|
EXPORT_SYMBOL(___clear_bit);
|
|
|
|
unsigned long ___change_bit(unsigned long *addr, unsigned long mask)
|
|
{
|
|
unsigned long old, flags;
|
|
|
|
spin_lock_irqsave(ATOMIC_HASH(addr), flags);
|
|
old = *addr;
|
|
*addr = old ^ mask;
|
|
spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
|
|
|
|
return old & mask;
|
|
}
|
|
EXPORT_SYMBOL(___change_bit);
|
|
|
|
unsigned long __cmpxchg_u32(volatile u32 *ptr, u32 old, u32 new)
|
|
{
|
|
unsigned long flags;
|
|
u32 prev;
|
|
|
|
spin_lock_irqsave(ATOMIC_HASH(ptr), flags);
|
|
if ((prev = *ptr) == old)
|
|
*ptr = new;
|
|
spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags);
|
|
|
|
return (unsigned long)prev;
|
|
}
|
|
EXPORT_SYMBOL(__cmpxchg_u32);
|
|
|
|
u64 __cmpxchg_u64(u64 *ptr, u64 old, u64 new)
|
|
{
|
|
unsigned long flags;
|
|
u64 prev;
|
|
|
|
spin_lock_irqsave(ATOMIC_HASH(ptr), flags);
|
|
if ((prev = *ptr) == old)
|
|
*ptr = new;
|
|
spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags);
|
|
|
|
return prev;
|
|
}
|
|
EXPORT_SYMBOL(__cmpxchg_u64);
|
|
|
|
unsigned long __xchg_u32(volatile u32 *ptr, u32 new)
|
|
{
|
|
unsigned long flags;
|
|
u32 prev;
|
|
|
|
spin_lock_irqsave(ATOMIC_HASH(ptr), flags);
|
|
prev = *ptr;
|
|
*ptr = new;
|
|
spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags);
|
|
|
|
return (unsigned long)prev;
|
|
}
|
|
EXPORT_SYMBOL(__xchg_u32);
|