mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-23 04:34:11 +08:00
8dc39b883e
atomic.h, bitops.h and mmu_context.h are using likely/unlikely. thread_info.h uses __attribute_const__. Hence these files require linux/compiler.h to be included. Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
210 lines
4.4 KiB
C
210 lines
4.4 KiB
C
/*
|
|
* linux/include/asm-arm/atomic.h
|
|
*
|
|
* Copyright (C) 1996 Russell King.
|
|
* Copyright (C) 2002 Deep Blue Solutions Ltd.
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
* published by the Free Software Foundation.
|
|
*/
|
|
#ifndef __ASM_ARM_ATOMIC_H
|
|
#define __ASM_ARM_ATOMIC_H
|
|
|
|
#include <linux/config.h>
|
|
#include <linux/compiler.h>
|
|
|
|
typedef struct { volatile int counter; } atomic_t;
|
|
|
|
#define ATOMIC_INIT(i) { (i) }
|
|
|
|
#ifdef __KERNEL__
|
|
|
|
#define atomic_read(v) ((v)->counter)
|
|
|
|
#if __LINUX_ARM_ARCH__ >= 6
|
|
|
|
/*
|
|
* ARMv6 UP and SMP safe atomic ops. We use load exclusive and
|
|
* store exclusive to ensure that these are atomic. We may loop
|
|
* to ensure that the update happens. Writing to 'v->counter'
|
|
* without using the following operations WILL break the atomic
|
|
* nature of these ops.
|
|
*/
|
|
static inline void atomic_set(atomic_t *v, int i)
|
|
{
|
|
unsigned long tmp;
|
|
|
|
__asm__ __volatile__("@ atomic_set\n"
|
|
"1: ldrex %0, [%1]\n"
|
|
" strex %0, %2, [%1]\n"
|
|
" teq %0, #0\n"
|
|
" bne 1b"
|
|
: "=&r" (tmp)
|
|
: "r" (&v->counter), "r" (i)
|
|
: "cc");
|
|
}
|
|
|
|
static inline int atomic_add_return(int i, atomic_t *v)
|
|
{
|
|
unsigned long tmp;
|
|
int result;
|
|
|
|
__asm__ __volatile__("@ atomic_add_return\n"
|
|
"1: ldrex %0, [%2]\n"
|
|
" add %0, %0, %3\n"
|
|
" strex %1, %0, [%2]\n"
|
|
" teq %1, #0\n"
|
|
" bne 1b"
|
|
: "=&r" (result), "=&r" (tmp)
|
|
: "r" (&v->counter), "Ir" (i)
|
|
: "cc");
|
|
|
|
return result;
|
|
}
|
|
|
|
static inline int atomic_sub_return(int i, atomic_t *v)
|
|
{
|
|
unsigned long tmp;
|
|
int result;
|
|
|
|
__asm__ __volatile__("@ atomic_sub_return\n"
|
|
"1: ldrex %0, [%2]\n"
|
|
" sub %0, %0, %3\n"
|
|
" strex %1, %0, [%2]\n"
|
|
" teq %1, #0\n"
|
|
" bne 1b"
|
|
: "=&r" (result), "=&r" (tmp)
|
|
: "r" (&v->counter), "Ir" (i)
|
|
: "cc");
|
|
|
|
return result;
|
|
}
|
|
|
|
static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
|
|
{
|
|
u32 oldval, res;
|
|
|
|
do {
|
|
__asm__ __volatile__("@ atomic_cmpxchg\n"
|
|
"ldrex %1, [%2]\n"
|
|
"mov %0, #0\n"
|
|
"teq %1, %3\n"
|
|
"strexeq %0, %4, [%2]\n"
|
|
: "=&r" (res), "=&r" (oldval)
|
|
: "r" (&ptr->counter), "Ir" (old), "r" (new)
|
|
: "cc");
|
|
} while (res);
|
|
|
|
return oldval;
|
|
}
|
|
|
|
static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
|
|
{
|
|
unsigned long tmp, tmp2;
|
|
|
|
__asm__ __volatile__("@ atomic_clear_mask\n"
|
|
"1: ldrex %0, %2\n"
|
|
" bic %0, %0, %3\n"
|
|
" strex %1, %0, %2\n"
|
|
" teq %1, #0\n"
|
|
" bne 1b"
|
|
: "=&r" (tmp), "=&r" (tmp2)
|
|
: "r" (addr), "Ir" (mask)
|
|
: "cc");
|
|
}
|
|
|
|
#else /* ARM_ARCH_6 */
|
|
|
|
#include <asm/system.h>
|
|
|
|
#ifdef CONFIG_SMP
|
|
#error SMP not supported on pre-ARMv6 CPUs
|
|
#endif
|
|
|
|
#define atomic_set(v,i) (((v)->counter) = (i))
|
|
|
|
static inline int atomic_add_return(int i, atomic_t *v)
|
|
{
|
|
unsigned long flags;
|
|
int val;
|
|
|
|
local_irq_save(flags);
|
|
val = v->counter;
|
|
v->counter = val += i;
|
|
local_irq_restore(flags);
|
|
|
|
return val;
|
|
}
|
|
|
|
static inline int atomic_sub_return(int i, atomic_t *v)
|
|
{
|
|
unsigned long flags;
|
|
int val;
|
|
|
|
local_irq_save(flags);
|
|
val = v->counter;
|
|
v->counter = val -= i;
|
|
local_irq_restore(flags);
|
|
|
|
return val;
|
|
}
|
|
|
|
static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
|
|
{
|
|
int ret;
|
|
unsigned long flags;
|
|
|
|
local_irq_save(flags);
|
|
ret = v->counter;
|
|
if (likely(ret == old))
|
|
v->counter = new;
|
|
local_irq_restore(flags);
|
|
|
|
return ret;
|
|
}
|
|
|
|
static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
|
|
{
|
|
unsigned long flags;
|
|
|
|
local_irq_save(flags);
|
|
*addr &= ~mask;
|
|
local_irq_restore(flags);
|
|
}
|
|
|
|
#endif /* __LINUX_ARM_ARCH__ */
|
|
|
|
static inline int atomic_add_unless(atomic_t *v, int a, int u)
|
|
{
|
|
int c, old;
|
|
|
|
c = atomic_read(v);
|
|
while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c)
|
|
c = old;
|
|
return c != u;
|
|
}
|
|
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
|
|
|
|
#define atomic_add(i, v) (void) atomic_add_return(i, v)
|
|
#define atomic_inc(v) (void) atomic_add_return(1, v)
|
|
#define atomic_sub(i, v) (void) atomic_sub_return(i, v)
|
|
#define atomic_dec(v) (void) atomic_sub_return(1, v)
|
|
|
|
#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
|
|
#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
|
|
#define atomic_inc_return(v) (atomic_add_return(1, v))
|
|
#define atomic_dec_return(v) (atomic_sub_return(1, v))
|
|
#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
|
|
|
|
#define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
|
|
|
|
/* Atomic operations are already serializing on ARM */
|
|
#define smp_mb__before_atomic_dec() barrier()
|
|
#define smp_mb__after_atomic_dec() barrier()
|
|
#define smp_mb__before_atomic_inc() barrier()
|
|
#define smp_mb__after_atomic_inc() barrier()
|
|
|
|
#endif
|
|
#endif
|