locking/atomic: Move ATOMIC_INIT into linux/types.h

This patch moves ATOMIC_INIT from asm/atomic.h into linux/types.h.
This allows users of atomic_t to use ATOMIC_INIT without having to
include atomic.h as that way may lead to header loops.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Waiman Long <longman@redhat.com>
Link: https://lkml.kernel.org/r/20200729123105.GB7047@gondor.apana.org.au
This commit is contained in:
Herbert Xu 2020-07-29 22:31:05 +10:00 committed by Peter Zijlstra
parent e885d5d947
commit 7ca8cf5347
20 changed files with 2 additions and 34 deletions

View File

@ -24,7 +24,6 @@
#define __atomic_acquire_fence()
#define __atomic_post_full_fence()
#define ATOMIC_INIT(i) { (i) }
#define ATOMIC64_INIT(i) { (i) }
#define atomic_read(v) READ_ONCE((v)->counter)

View File

@ -14,8 +14,6 @@
#include <asm/barrier.h>
#include <asm/smp.h>
#define ATOMIC_INIT(i) { (i) }
#ifndef CONFIG_ARC_PLAT_EZNPS
#define atomic_read(v) READ_ONCE((v)->counter)

View File

@ -15,8 +15,6 @@
#include <asm/barrier.h>
#include <asm/cmpxchg.h>
#define ATOMIC_INIT(i) { (i) }
#ifdef __KERNEL__
/*

View File

@ -99,8 +99,6 @@ static inline long arch_atomic64_dec_if_positive(atomic64_t *v)
return __lse_ll_sc_body(atomic64_dec_if_positive, v);
}
#define ATOMIC_INIT(i) { (i) }
#define arch_atomic_read(v) __READ_ONCE((v)->counter)
#define arch_atomic_set(v, i) __WRITE_ONCE(((v)->counter), (i))

View File

@ -12,8 +12,6 @@
* resource counting etc..
*/
#define ATOMIC_INIT(i) { (i) }
#define atomic_read(v) READ_ONCE((v)->counter)
#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))

View File

@ -12,8 +12,6 @@
#include <asm/cmpxchg.h>
#include <asm/barrier.h>
#define ATOMIC_INIT(i) { (i) }
/* Normal writes in our arch don't clear lock reservations */
static inline void atomic_set(atomic_t *v, int new)

View File

@ -19,7 +19,6 @@
#include <asm/barrier.h>
#define ATOMIC_INIT(i) { (i) }
#define ATOMIC64_INIT(i) { (i) }
#define atomic_read(v) READ_ONCE((v)->counter)

View File

@ -16,8 +16,6 @@
* We do not have SMP m68k systems, so we don't have to deal with that.
*/
#define ATOMIC_INIT(i) { (i) }
#define atomic_read(v) READ_ONCE((v)->counter)
#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))

View File

@ -45,7 +45,6 @@ static __always_inline type pfx##_xchg(pfx##_t *v, type n) \
return xchg(&v->counter, n); \
}
#define ATOMIC_INIT(i) { (i) }
ATOMIC_OPS(atomic, int)
#ifdef CONFIG_64BIT

View File

@ -136,8 +136,6 @@ ATOMIC_OPS(xor, ^=)
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
#define ATOMIC_INIT(i) { (i) }
#ifdef CONFIG_64BIT
#define ATOMIC64_INIT(i) { (i) }

View File

@ -11,8 +11,6 @@
#include <asm/cmpxchg.h>
#include <asm/barrier.h>
#define ATOMIC_INIT(i) { (i) }
/*
* Since *_return_relaxed and {cmp}xchg_relaxed are implemented with
* a "bne-" instruction at the end, so an isync is enough as a acquire barrier

View File

@ -19,8 +19,6 @@
#include <asm/cmpxchg.h>
#include <asm/barrier.h>
#define ATOMIC_INIT(i) { (i) }
#define __atomic_acquire_fence() \
__asm__ __volatile__(RISCV_ACQUIRE_BARRIER "" ::: "memory")

View File

@ -15,8 +15,6 @@
#include <asm/barrier.h>
#include <asm/cmpxchg.h>
#define ATOMIC_INIT(i) { (i) }
static inline int atomic_read(const atomic_t *v)
{
int c;

View File

@ -19,8 +19,6 @@
#include <asm/cmpxchg.h>
#include <asm/barrier.h>
#define ATOMIC_INIT(i) { (i) }
#define atomic_read(v) READ_ONCE((v)->counter)
#define atomic_set(v,i) WRITE_ONCE((v)->counter, (i))

View File

@ -18,8 +18,6 @@
#include <asm/barrier.h>
#include <asm-generic/atomic64.h>
#define ATOMIC_INIT(i) { (i) }
int atomic_add_return(int, atomic_t *);
int atomic_fetch_add(int, atomic_t *);
int atomic_fetch_and(int, atomic_t *);

View File

@ -12,7 +12,6 @@
#include <asm/cmpxchg.h>
#include <asm/barrier.h>
#define ATOMIC_INIT(i) { (i) }
#define ATOMIC64_INIT(i) { (i) }
#define atomic_read(v) READ_ONCE((v)->counter)

View File

@ -14,8 +14,6 @@
* resource counting etc..
*/
#define ATOMIC_INIT(i) { (i) }
/**
* arch_atomic_read - read atomic variable
* @v: pointer of type atomic_t

View File

@ -19,8 +19,6 @@
#include <asm/cmpxchg.h>
#include <asm/barrier.h>
#define ATOMIC_INIT(i) { (i) }
/*
* This Xtensa implementation assumes that the right mechanism
* for exclusion is for locking interrupts to level EXCM_LEVEL.

View File

@ -159,8 +159,6 @@ ATOMIC_OP(xor, ^)
* resource counting etc..
*/
#define ATOMIC_INIT(i) { (i) }
/**
* atomic_read - read atomic variable
* @v: pointer of type atomic_t

View File

@ -167,6 +167,8 @@ typedef struct {
int counter;
} atomic_t;
#define ATOMIC_INIT(i) { (i) }
#ifdef CONFIG_64BIT
typedef struct {
s64 counter;