mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-01 08:04:22 +08:00
b4d0d230cc
Based on 1 normalized pattern(s): this program is free software you can redistribute it and or modify it under the terms of the gnu general public licence as published by the free software foundation either version 2 of the licence or at your option any later version extracted by the scancode license scanner the SPDX license identifier GPL-2.0-or-later has been chosen to replace the boilerplate/reference in 114 file(s). Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Allison Randal <allison@lohutok.net> Reviewed-by: Kate Stewart <kstewart@linuxfoundation.org> Cc: linux-spdx@vger.kernel.org Link: https://lkml.kernel.org/r/20190520170857.552531963@linutronix.de Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
199 lines
4.1 KiB
C
199 lines
4.1 KiB
C
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
|
/*
|
|
* Generic C implementation of atomic counter operations. Usable on
|
|
* UP systems only. Do not include in machine independent code.
|
|
*
|
|
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
|
|
* Written by David Howells (dhowells@redhat.com)
|
|
*/
|
|
#ifndef __ASM_GENERIC_ATOMIC_H
|
|
#define __ASM_GENERIC_ATOMIC_H
|
|
|
|
#include <asm/cmpxchg.h>
|
|
#include <asm/barrier.h>
|
|
|
|
/*
|
|
* atomic_$op() - $op integer to atomic variable
|
|
* @i: integer value to $op
|
|
* @v: pointer to the atomic variable
|
|
*
|
|
* Atomically $ops @i to @v. Does not strictly guarantee a memory-barrier, use
|
|
* smp_mb__{before,after}_atomic().
|
|
*/
|
|
|
|
/*
|
|
* atomic_$op_return() - $op interer to atomic variable and returns the result
|
|
* @i: integer value to $op
|
|
* @v: pointer to the atomic variable
|
|
*
|
|
* Atomically $ops @i to @v. Does imply a full memory barrier.
|
|
*/
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
/* we can build all atomic primitives from cmpxchg */
|
|
|
|
#define ATOMIC_OP(op, c_op) \
|
|
static inline void atomic_##op(int i, atomic_t *v) \
|
|
{ \
|
|
int c, old; \
|
|
\
|
|
c = v->counter; \
|
|
while ((old = cmpxchg(&v->counter, c, c c_op i)) != c) \
|
|
c = old; \
|
|
}
|
|
|
|
#define ATOMIC_OP_RETURN(op, c_op) \
|
|
static inline int atomic_##op##_return(int i, atomic_t *v) \
|
|
{ \
|
|
int c, old; \
|
|
\
|
|
c = v->counter; \
|
|
while ((old = cmpxchg(&v->counter, c, c c_op i)) != c) \
|
|
c = old; \
|
|
\
|
|
return c c_op i; \
|
|
}
|
|
|
|
#define ATOMIC_FETCH_OP(op, c_op) \
|
|
static inline int atomic_fetch_##op(int i, atomic_t *v) \
|
|
{ \
|
|
int c, old; \
|
|
\
|
|
c = v->counter; \
|
|
while ((old = cmpxchg(&v->counter, c, c c_op i)) != c) \
|
|
c = old; \
|
|
\
|
|
return c; \
|
|
}
|
|
|
|
#else
|
|
|
|
#include <linux/irqflags.h>
|
|
|
|
#define ATOMIC_OP(op, c_op) \
|
|
static inline void atomic_##op(int i, atomic_t *v) \
|
|
{ \
|
|
unsigned long flags; \
|
|
\
|
|
raw_local_irq_save(flags); \
|
|
v->counter = v->counter c_op i; \
|
|
raw_local_irq_restore(flags); \
|
|
}
|
|
|
|
#define ATOMIC_OP_RETURN(op, c_op) \
|
|
static inline int atomic_##op##_return(int i, atomic_t *v) \
|
|
{ \
|
|
unsigned long flags; \
|
|
int ret; \
|
|
\
|
|
raw_local_irq_save(flags); \
|
|
ret = (v->counter = v->counter c_op i); \
|
|
raw_local_irq_restore(flags); \
|
|
\
|
|
return ret; \
|
|
}
|
|
|
|
#define ATOMIC_FETCH_OP(op, c_op) \
|
|
static inline int atomic_fetch_##op(int i, atomic_t *v) \
|
|
{ \
|
|
unsigned long flags; \
|
|
int ret; \
|
|
\
|
|
raw_local_irq_save(flags); \
|
|
ret = v->counter; \
|
|
v->counter = v->counter c_op i; \
|
|
raw_local_irq_restore(flags); \
|
|
\
|
|
return ret; \
|
|
}
|
|
|
|
#endif /* CONFIG_SMP */
|
|
|
|
#ifndef atomic_add_return
|
|
ATOMIC_OP_RETURN(add, +)
|
|
#endif
|
|
|
|
#ifndef atomic_sub_return
|
|
ATOMIC_OP_RETURN(sub, -)
|
|
#endif
|
|
|
|
#ifndef atomic_fetch_add
|
|
ATOMIC_FETCH_OP(add, +)
|
|
#endif
|
|
|
|
#ifndef atomic_fetch_sub
|
|
ATOMIC_FETCH_OP(sub, -)
|
|
#endif
|
|
|
|
#ifndef atomic_fetch_and
|
|
ATOMIC_FETCH_OP(and, &)
|
|
#endif
|
|
|
|
#ifndef atomic_fetch_or
|
|
ATOMIC_FETCH_OP(or, |)
|
|
#endif
|
|
|
|
#ifndef atomic_fetch_xor
|
|
ATOMIC_FETCH_OP(xor, ^)
|
|
#endif
|
|
|
|
#ifndef atomic_and
|
|
ATOMIC_OP(and, &)
|
|
#endif
|
|
|
|
#ifndef atomic_or
|
|
ATOMIC_OP(or, |)
|
|
#endif
|
|
|
|
#ifndef atomic_xor
|
|
ATOMIC_OP(xor, ^)
|
|
#endif
|
|
|
|
#undef ATOMIC_FETCH_OP
|
|
#undef ATOMIC_OP_RETURN
|
|
#undef ATOMIC_OP
|
|
|
|
/*
|
|
* Atomic operations that C can't guarantee us. Useful for
|
|
* resource counting etc..
|
|
*/
|
|
|
|
#define ATOMIC_INIT(i) { (i) }
|
|
|
|
/**
|
|
* atomic_read - read atomic variable
|
|
* @v: pointer of type atomic_t
|
|
*
|
|
* Atomically reads the value of @v.
|
|
*/
|
|
#ifndef atomic_read
|
|
#define atomic_read(v) READ_ONCE((v)->counter)
|
|
#endif
|
|
|
|
/**
|
|
* atomic_set - set atomic variable
|
|
* @v: pointer of type atomic_t
|
|
* @i: required value
|
|
*
|
|
* Atomically sets the value of @v to @i.
|
|
*/
|
|
#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
|
|
|
|
#include <linux/irqflags.h>
|
|
|
|
static inline void atomic_add(int i, atomic_t *v)
|
|
{
|
|
atomic_add_return(i, v);
|
|
}
|
|
|
|
static inline void atomic_sub(int i, atomic_t *v)
|
|
{
|
|
atomic_sub_return(i, v);
|
|
}
|
|
|
|
#define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v)))
|
|
#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new)))
|
|
|
|
#endif /* __ASM_GENERIC_ATOMIC_H */
|