mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 21:38:32 +08:00
virtio: barrier rework+fixes
This adds a new kind of barrier, and reworks virtio and xen to use it. Plus some fixes here and there. Signed-off-by: Michael S. Tsirkin <mst@redhat.com> -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQEcBAABAgAGBQJWlU2kAAoJECgfDbjSjVRpZ6IH/Ra19ecG8sCQo9zskr4zo22Z DZXC3u0sJDBYjjBAiw3IY1FKh7wx2Fr1RhUOj1bteBgcFCMCV1zInP5ITiCyzd1H YYh1w9C2tZaj2T4t9L4hIrAdtIF8fGS+oI2IojXPjOuDLEt6pfFBEjHp/sfl3UJq ZmZvw4OXviSNej7jBw8Xni3Uv18yfmLGXvMdkvMSPC1/XL29voGDqTVwhqJwxLVz k/ZLcKFOzIs9N7Nja0Jl1EiZtC2Y9cpItqweicNAzszlpkSL44vQxmCSefB+WyQ4 gt0O3+AxYkLfrxzCBhUA4IpRex3/XPW1b+1e/V1XjfR2n/FlyLe+AIa8uPJElFc= =ukaV -----END PGP SIGNATURE----- Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost Pull virtio barrier rework+fixes from Michael Tsirkin: "This adds a new kind of barrier, and reworks virtio and xen to use it. Plus some fixes here and there" * tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost: (44 commits) checkpatch: add virt barriers checkpatch: check for __smp outside barrier.h checkpatch.pl: add missing memory barriers virtio: make find_vqs() checkpatch.pl-friendly virtio_balloon: fix race between migration and ballooning virtio_balloon: fix race by fill and leak s390: more efficient smp barriers s390: use generic memory barriers xen/events: use virt_xxx barriers xen/io: use virt_xxx barriers xenbus: use virt_xxx barriers virtio_ring: use virt_store_mb sh: move xchg_cmpxchg to a header by itself sh: support 1 and 2 byte xchg virtio_ring: update weak barriers to use virt_xxx Revert "virtio_ring: Update weak barriers to use dma_wmb/rmb" asm-generic: implement virt_xxx memory barriers x86: define __smp_xxx xtensa: define __smp_xxx tile: define __smp_xxx ...
This commit is contained in:
commit
a200dcb346
@ -1655,17 +1655,18 @@ macro is a good place to start looking.
|
||||
SMP memory barriers are reduced to compiler barriers on uniprocessor compiled
|
||||
systems because it is assumed that a CPU will appear to be self-consistent,
|
||||
and will order overlapping accesses correctly with respect to itself.
|
||||
However, see the subsection on "Virtual Machine Guests" below.
|
||||
|
||||
[!] Note that SMP memory barriers _must_ be used to control the ordering of
|
||||
references to shared memory on SMP systems, though the use of locking instead
|
||||
is sufficient.
|
||||
|
||||
Mandatory barriers should not be used to control SMP effects, since mandatory
|
||||
barriers unnecessarily impose overhead on UP systems. They may, however, be
|
||||
used to control MMIO effects on accesses through relaxed memory I/O windows.
|
||||
These are required even on non-SMP systems as they affect the order in which
|
||||
memory operations appear to a device by prohibiting both the compiler and the
|
||||
CPU from reordering them.
|
||||
barriers impose unnecessary overhead on both SMP and UP systems. They may,
|
||||
however, be used to control MMIO effects on accesses through relaxed memory I/O
|
||||
windows. These barriers are required even on non-SMP systems as they affect
|
||||
the order in which memory operations appear to a device by prohibiting both the
|
||||
compiler and the CPU from reordering them.
|
||||
|
||||
|
||||
There are some more advanced barrier functions:
|
||||
@ -2948,6 +2949,23 @@ The Alpha defines the Linux kernel's memory barrier model.
|
||||
|
||||
See the subsection on "Cache Coherency" above.
|
||||
|
||||
VIRTUAL MACHINE GUESTS
|
||||
-------------------
|
||||
|
||||
Guests running within virtual machines might be affected by SMP effects even if
|
||||
the guest itself is compiled without SMP support. This is an artifact of
|
||||
interfacing with an SMP host while running an UP kernel. Using mandatory
|
||||
barriers for this use-case would be possible but is often suboptimal.
|
||||
|
||||
To handle this case optimally, low-level virt_mb() etc macros are available.
|
||||
These have the same effect as smp_mb() etc when SMP is enabled, but generate
|
||||
identical code for SMP and non-SMP systems. For example, virtual machine guests
|
||||
should use virt_mb() rather than smp_mb() when synchronizing against a
|
||||
(possibly SMP) host.
|
||||
|
||||
These are equivalent to smp_mb() etc counterparts in all other respects,
|
||||
in particular, they do not control MMIO effects: to control
|
||||
MMIO effects, use mandatory barriers.
|
||||
|
||||
============
|
||||
EXAMPLE USES
|
||||
|
@ -60,38 +60,11 @@ extern void arm_heavy_mb(void);
|
||||
#define dma_wmb() barrier()
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_SMP
|
||||
#define smp_mb() barrier()
|
||||
#define smp_rmb() barrier()
|
||||
#define smp_wmb() barrier()
|
||||
#else
|
||||
#define smp_mb() dmb(ish)
|
||||
#define smp_rmb() smp_mb()
|
||||
#define smp_wmb() dmb(ishst)
|
||||
#endif
|
||||
#define __smp_mb() dmb(ish)
|
||||
#define __smp_rmb() __smp_mb()
|
||||
#define __smp_wmb() dmb(ishst)
|
||||
|
||||
#define smp_store_release(p, v) \
|
||||
do { \
|
||||
compiletime_assert_atomic_type(*p); \
|
||||
smp_mb(); \
|
||||
WRITE_ONCE(*p, v); \
|
||||
} while (0)
|
||||
|
||||
#define smp_load_acquire(p) \
|
||||
({ \
|
||||
typeof(*p) ___p1 = READ_ONCE(*p); \
|
||||
compiletime_assert_atomic_type(*p); \
|
||||
smp_mb(); \
|
||||
___p1; \
|
||||
})
|
||||
|
||||
#define read_barrier_depends() do { } while(0)
|
||||
#define smp_read_barrier_depends() do { } while(0)
|
||||
|
||||
#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0)
|
||||
|
||||
#define smp_mb__before_atomic() smp_mb()
|
||||
#define smp_mb__after_atomic() smp_mb()
|
||||
#include <asm-generic/barrier.h>
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
#endif /* __ASM_BARRIER_H */
|
||||
|
@ -35,11 +35,11 @@
|
||||
#define dma_rmb() dmb(oshld)
|
||||
#define dma_wmb() dmb(oshst)
|
||||
|
||||
#define smp_mb() dmb(ish)
|
||||
#define smp_rmb() dmb(ishld)
|
||||
#define smp_wmb() dmb(ishst)
|
||||
#define __smp_mb() dmb(ish)
|
||||
#define __smp_rmb() dmb(ishld)
|
||||
#define __smp_wmb() dmb(ishst)
|
||||
|
||||
#define smp_store_release(p, v) \
|
||||
#define __smp_store_release(p, v) \
|
||||
do { \
|
||||
compiletime_assert_atomic_type(*p); \
|
||||
switch (sizeof(*p)) { \
|
||||
@ -62,7 +62,7 @@ do { \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define smp_load_acquire(p) \
|
||||
#define __smp_load_acquire(p) \
|
||||
({ \
|
||||
union { typeof(*p) __val; char __c[1]; } __u; \
|
||||
compiletime_assert_atomic_type(*p); \
|
||||
@ -91,14 +91,7 @@ do { \
|
||||
__u.__val; \
|
||||
})
|
||||
|
||||
#define read_barrier_depends() do { } while(0)
|
||||
#define smp_read_barrier_depends() do { } while(0)
|
||||
|
||||
#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0)
|
||||
#define nop() asm volatile("nop");
|
||||
|
||||
#define smp_mb__before_atomic() smp_mb()
|
||||
#define smp_mb__after_atomic() smp_mb()
|
||||
#include <asm-generic/barrier.h>
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
|
@ -78,8 +78,8 @@
|
||||
|
||||
#endif /* !CONFIG_SMP */
|
||||
|
||||
#define smp_mb__before_atomic() barrier()
|
||||
#define smp_mb__after_atomic() barrier()
|
||||
#define __smp_mb__before_atomic() barrier()
|
||||
#define __smp_mb__after_atomic() barrier()
|
||||
|
||||
#include <asm-generic/barrier.h>
|
||||
|
||||
|
@ -42,34 +42,24 @@
|
||||
#define dma_rmb() mb()
|
||||
#define dma_wmb() mb()
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
# define smp_mb() mb()
|
||||
#else
|
||||
# define smp_mb() barrier()
|
||||
#endif
|
||||
# define __smp_mb() mb()
|
||||
|
||||
#define smp_rmb() smp_mb()
|
||||
#define smp_wmb() smp_mb()
|
||||
|
||||
#define read_barrier_depends() do { } while (0)
|
||||
#define smp_read_barrier_depends() do { } while (0)
|
||||
|
||||
#define smp_mb__before_atomic() barrier()
|
||||
#define smp_mb__after_atomic() barrier()
|
||||
#define __smp_mb__before_atomic() barrier()
|
||||
#define __smp_mb__after_atomic() barrier()
|
||||
|
||||
/*
|
||||
* IA64 GCC turns volatile stores into st.rel and volatile loads into ld.acq no
|
||||
* need for asm trickery!
|
||||
*/
|
||||
|
||||
#define smp_store_release(p, v) \
|
||||
#define __smp_store_release(p, v) \
|
||||
do { \
|
||||
compiletime_assert_atomic_type(*p); \
|
||||
barrier(); \
|
||||
WRITE_ONCE(*p, v); \
|
||||
} while (0)
|
||||
|
||||
#define smp_load_acquire(p) \
|
||||
#define __smp_load_acquire(p) \
|
||||
({ \
|
||||
typeof(*p) ___p1 = READ_ONCE(*p); \
|
||||
compiletime_assert_atomic_type(*p); \
|
||||
@ -77,12 +67,12 @@ do { \
|
||||
___p1; \
|
||||
})
|
||||
|
||||
#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0)
|
||||
|
||||
/*
|
||||
* The group barrier in front of the rsm & ssm are necessary to ensure
|
||||
* that none of the previous instructions in the same group are
|
||||
* affected by the rsm/ssm.
|
||||
*/
|
||||
|
||||
#include <asm-generic/barrier.h>
|
||||
|
||||
#endif /* _ASM_IA64_BARRIER_H */
|
||||
|
@ -256,7 +256,7 @@ set_rte (unsigned int gsi, unsigned int irq, unsigned int dest, int mask)
|
||||
}
|
||||
|
||||
static void
|
||||
nop (struct irq_data *data)
|
||||
iosapic_nop (struct irq_data *data)
|
||||
{
|
||||
/* do nothing... */
|
||||
}
|
||||
@ -415,7 +415,7 @@ iosapic_unmask_level_irq (struct irq_data *data)
|
||||
#define iosapic_shutdown_level_irq mask_irq
|
||||
#define iosapic_enable_level_irq unmask_irq
|
||||
#define iosapic_disable_level_irq mask_irq
|
||||
#define iosapic_ack_level_irq nop
|
||||
#define iosapic_ack_level_irq iosapic_nop
|
||||
|
||||
static struct irq_chip irq_type_iosapic_level = {
|
||||
.name = "IO-SAPIC-level",
|
||||
@ -453,7 +453,7 @@ iosapic_ack_edge_irq (struct irq_data *data)
|
||||
}
|
||||
|
||||
#define iosapic_enable_edge_irq unmask_irq
|
||||
#define iosapic_disable_edge_irq nop
|
||||
#define iosapic_disable_edge_irq iosapic_nop
|
||||
|
||||
static struct irq_chip irq_type_iosapic_edge = {
|
||||
.name = "IO-SAPIC-edge",
|
||||
|
@ -44,16 +44,6 @@ static inline void wr_fence(void)
|
||||
#define rmb() barrier()
|
||||
#define wmb() mb()
|
||||
|
||||
#define dma_rmb() rmb()
|
||||
#define dma_wmb() wmb()
|
||||
|
||||
#ifndef CONFIG_SMP
|
||||
#define fence() do { } while (0)
|
||||
#define smp_mb() barrier()
|
||||
#define smp_rmb() barrier()
|
||||
#define smp_wmb() barrier()
|
||||
#else
|
||||
|
||||
#ifdef CONFIG_METAG_SMP_WRITE_REORDERING
|
||||
/*
|
||||
* Write to the atomic memory unlock system event register (command 0). This is
|
||||
@ -63,45 +53,32 @@ static inline void wr_fence(void)
|
||||
* incoherence). It is therefore ineffective if used after and on the same
|
||||
* thread as a write.
|
||||
*/
|
||||
static inline void fence(void)
|
||||
static inline void metag_fence(void)
|
||||
{
|
||||
volatile int *flushptr = (volatile int *) LINSYSEVENT_WR_ATOMIC_UNLOCK;
|
||||
barrier();
|
||||
*flushptr = 0;
|
||||
barrier();
|
||||
}
|
||||
#define smp_mb() fence()
|
||||
#define smp_rmb() fence()
|
||||
#define smp_wmb() barrier()
|
||||
#define __smp_mb() metag_fence()
|
||||
#define __smp_rmb() metag_fence()
|
||||
#define __smp_wmb() barrier()
|
||||
#else
|
||||
#define metag_fence() do { } while (0)
|
||||
#define __smp_mb() barrier()
|
||||
#define __smp_rmb() barrier()
|
||||
#define __smp_wmb() barrier()
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
#define fence() metag_fence()
|
||||
#else
|
||||
#define fence() do { } while (0)
|
||||
#define smp_mb() barrier()
|
||||
#define smp_rmb() barrier()
|
||||
#define smp_wmb() barrier()
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#define read_barrier_depends() do { } while (0)
|
||||
#define smp_read_barrier_depends() do { } while (0)
|
||||
#define __smp_mb__before_atomic() barrier()
|
||||
#define __smp_mb__after_atomic() barrier()
|
||||
|
||||
#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0)
|
||||
|
||||
#define smp_store_release(p, v) \
|
||||
do { \
|
||||
compiletime_assert_atomic_type(*p); \
|
||||
smp_mb(); \
|
||||
WRITE_ONCE(*p, v); \
|
||||
} while (0)
|
||||
|
||||
#define smp_load_acquire(p) \
|
||||
({ \
|
||||
typeof(*p) ___p1 = READ_ONCE(*p); \
|
||||
compiletime_assert_atomic_type(*p); \
|
||||
smp_mb(); \
|
||||
___p1; \
|
||||
})
|
||||
|
||||
#define smp_mb__before_atomic() barrier()
|
||||
#define smp_mb__after_atomic() barrier()
|
||||
#include <asm-generic/barrier.h>
|
||||
|
||||
#endif /* _ASM_METAG_BARRIER_H */
|
||||
|
@ -10,9 +10,6 @@
|
||||
|
||||
#include <asm/addrspace.h>
|
||||
|
||||
#define read_barrier_depends() do { } while(0)
|
||||
#define smp_read_barrier_depends() do { } while(0)
|
||||
|
||||
#ifdef CONFIG_CPU_HAS_SYNC
|
||||
#define __sync() \
|
||||
__asm__ __volatile__( \
|
||||
@ -87,23 +84,21 @@
|
||||
|
||||
#define wmb() fast_wmb()
|
||||
#define rmb() fast_rmb()
|
||||
#define dma_wmb() fast_wmb()
|
||||
#define dma_rmb() fast_rmb()
|
||||
|
||||
#if defined(CONFIG_WEAK_ORDERING) && defined(CONFIG_SMP)
|
||||
#if defined(CONFIG_WEAK_ORDERING)
|
||||
# ifdef CONFIG_CPU_CAVIUM_OCTEON
|
||||
# define smp_mb() __sync()
|
||||
# define smp_rmb() barrier()
|
||||
# define smp_wmb() __syncw()
|
||||
# define __smp_mb() __sync()
|
||||
# define __smp_rmb() barrier()
|
||||
# define __smp_wmb() __syncw()
|
||||
# else
|
||||
# define smp_mb() __asm__ __volatile__("sync" : : :"memory")
|
||||
# define smp_rmb() __asm__ __volatile__("sync" : : :"memory")
|
||||
# define smp_wmb() __asm__ __volatile__("sync" : : :"memory")
|
||||
# define __smp_mb() __asm__ __volatile__("sync" : : :"memory")
|
||||
# define __smp_rmb() __asm__ __volatile__("sync" : : :"memory")
|
||||
# define __smp_wmb() __asm__ __volatile__("sync" : : :"memory")
|
||||
# endif
|
||||
#else
|
||||
#define smp_mb() barrier()
|
||||
#define smp_rmb() barrier()
|
||||
#define smp_wmb() barrier()
|
||||
#define __smp_mb() barrier()
|
||||
#define __smp_rmb() barrier()
|
||||
#define __smp_wmb() barrier()
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_WEAK_REORDERING_BEYOND_LLSC) && defined(CONFIG_SMP)
|
||||
@ -112,13 +107,11 @@
|
||||
#define __WEAK_LLSC_MB " \n"
|
||||
#endif
|
||||
|
||||
#define smp_store_mb(var, value) \
|
||||
do { WRITE_ONCE(var, value); smp_mb(); } while (0)
|
||||
|
||||
#define smp_llsc_mb() __asm__ __volatile__(__WEAK_LLSC_MB : : :"memory")
|
||||
|
||||
#ifdef CONFIG_CPU_CAVIUM_OCTEON
|
||||
#define smp_mb__before_llsc() smp_wmb()
|
||||
#define __smp_mb__before_llsc() __smp_wmb()
|
||||
/* Cause previous writes to become visible on all CPUs as soon as possible */
|
||||
#define nudge_writes() __asm__ __volatile__(".set push\n\t" \
|
||||
".set arch=octeon\n\t" \
|
||||
@ -126,25 +119,13 @@
|
||||
".set pop" : : : "memory")
|
||||
#else
|
||||
#define smp_mb__before_llsc() smp_llsc_mb()
|
||||
#define __smp_mb__before_llsc() smp_llsc_mb()
|
||||
#define nudge_writes() mb()
|
||||
#endif
|
||||
|
||||
#define smp_store_release(p, v) \
|
||||
do { \
|
||||
compiletime_assert_atomic_type(*p); \
|
||||
smp_mb(); \
|
||||
WRITE_ONCE(*p, v); \
|
||||
} while (0)
|
||||
#define __smp_mb__before_atomic() __smp_mb__before_llsc()
|
||||
#define __smp_mb__after_atomic() smp_llsc_mb()
|
||||
|
||||
#define smp_load_acquire(p) \
|
||||
({ \
|
||||
typeof(*p) ___p1 = READ_ONCE(*p); \
|
||||
compiletime_assert_atomic_type(*p); \
|
||||
smp_mb(); \
|
||||
___p1; \
|
||||
})
|
||||
|
||||
#define smp_mb__before_atomic() smp_mb__before_llsc()
|
||||
#define smp_mb__after_atomic() smp_llsc_mb()
|
||||
#include <asm-generic/barrier.h>
|
||||
|
||||
#endif /* __ASM_BARRIER_H */
|
||||
|
@ -34,8 +34,6 @@
|
||||
#define rmb() __asm__ __volatile__ ("sync" : : : "memory")
|
||||
#define wmb() __asm__ __volatile__ ("sync" : : : "memory")
|
||||
|
||||
#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0)
|
||||
|
||||
#ifdef __SUBARCH_HAS_LWSYNC
|
||||
# define SMPWMB LWSYNC
|
||||
#else
|
||||
@ -46,22 +44,11 @@
|
||||
#define dma_rmb() __lwsync()
|
||||
#define dma_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory")
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
#define smp_lwsync() __lwsync()
|
||||
#define __smp_lwsync() __lwsync()
|
||||
|
||||
#define smp_mb() mb()
|
||||
#define smp_rmb() __lwsync()
|
||||
#define smp_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory")
|
||||
#else
|
||||
#define smp_lwsync() barrier()
|
||||
|
||||
#define smp_mb() barrier()
|
||||
#define smp_rmb() barrier()
|
||||
#define smp_wmb() barrier()
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
#define read_barrier_depends() do { } while (0)
|
||||
#define smp_read_barrier_depends() do { } while (0)
|
||||
#define __smp_mb() mb()
|
||||
#define __smp_rmb() __lwsync()
|
||||
#define __smp_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory")
|
||||
|
||||
/*
|
||||
* This is a barrier which prevents following instructions from being
|
||||
@ -72,23 +59,23 @@
|
||||
#define data_barrier(x) \
|
||||
asm volatile("twi 0,%0,0; isync" : : "r" (x) : "memory");
|
||||
|
||||
#define smp_store_release(p, v) \
|
||||
#define __smp_store_release(p, v) \
|
||||
do { \
|
||||
compiletime_assert_atomic_type(*p); \
|
||||
smp_lwsync(); \
|
||||
__smp_lwsync(); \
|
||||
WRITE_ONCE(*p, v); \
|
||||
} while (0)
|
||||
|
||||
#define smp_load_acquire(p) \
|
||||
#define __smp_load_acquire(p) \
|
||||
({ \
|
||||
typeof(*p) ___p1 = READ_ONCE(*p); \
|
||||
compiletime_assert_atomic_type(*p); \
|
||||
smp_lwsync(); \
|
||||
__smp_lwsync(); \
|
||||
___p1; \
|
||||
})
|
||||
|
||||
#define smp_mb__before_atomic() smp_mb()
|
||||
#define smp_mb__after_atomic() smp_mb()
|
||||
#define smp_mb__before_spinlock() smp_mb()
|
||||
|
||||
#include <asm-generic/barrier.h>
|
||||
|
||||
#endif /* _ASM_POWERPC_BARRIER_H */
|
||||
|
@ -26,26 +26,18 @@
|
||||
#define wmb() barrier()
|
||||
#define dma_rmb() mb()
|
||||
#define dma_wmb() mb()
|
||||
#define smp_mb() mb()
|
||||
#define smp_rmb() rmb()
|
||||
#define smp_wmb() wmb()
|
||||
#define __smp_mb() mb()
|
||||
#define __smp_rmb() rmb()
|
||||
#define __smp_wmb() wmb()
|
||||
|
||||
#define read_barrier_depends() do { } while (0)
|
||||
#define smp_read_barrier_depends() do { } while (0)
|
||||
|
||||
#define smp_mb__before_atomic() smp_mb()
|
||||
#define smp_mb__after_atomic() smp_mb()
|
||||
|
||||
#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0)
|
||||
|
||||
#define smp_store_release(p, v) \
|
||||
#define __smp_store_release(p, v) \
|
||||
do { \
|
||||
compiletime_assert_atomic_type(*p); \
|
||||
barrier(); \
|
||||
WRITE_ONCE(*p, v); \
|
||||
} while (0)
|
||||
|
||||
#define smp_load_acquire(p) \
|
||||
#define __smp_load_acquire(p) \
|
||||
({ \
|
||||
typeof(*p) ___p1 = READ_ONCE(*p); \
|
||||
compiletime_assert_atomic_type(*p); \
|
||||
@ -53,4 +45,9 @@ do { \
|
||||
___p1; \
|
||||
})
|
||||
|
||||
#define __smp_mb__before_atomic() barrier()
|
||||
#define __smp_mb__after_atomic() barrier()
|
||||
|
||||
#include <asm-generic/barrier.h>
|
||||
|
||||
#endif /* __ASM_BARRIER_H */
|
||||
|
@ -32,7 +32,8 @@
|
||||
#define ctrl_barrier() __asm__ __volatile__ ("nop;nop;nop;nop;nop;nop;nop;nop")
|
||||
#endif
|
||||
|
||||
#define smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0)
|
||||
#define __smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0)
|
||||
#define smp_store_mb(var, value) __smp_store_mb(var, value)
|
||||
|
||||
#include <asm-generic/barrier.h>
|
||||
|
||||
|
@ -23,6 +23,28 @@ static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val)
|
||||
return retval;
|
||||
}
|
||||
|
||||
static inline unsigned long xchg_u16(volatile u16 *m, unsigned long val)
|
||||
{
|
||||
unsigned long retval;
|
||||
|
||||
__asm__ __volatile__ (
|
||||
" .align 2 \n\t"
|
||||
" mova 1f, r0 \n\t" /* r0 = end point */
|
||||
" mov r15, r1 \n\t" /* r1 = saved sp */
|
||||
" mov #-6, r15 \n\t" /* LOGIN */
|
||||
" mov.w @%1, %0 \n\t" /* load old value */
|
||||
" extu.w %0, %0 \n\t" /* extend as unsigned */
|
||||
" mov.w %2, @%1 \n\t" /* store new value */
|
||||
"1: mov r1, r15 \n\t" /* LOGOUT */
|
||||
: "=&r" (retval),
|
||||
"+r" (m),
|
||||
"+r" (val) /* inhibit r15 overloading */
|
||||
:
|
||||
: "memory" , "r0", "r1");
|
||||
|
||||
return retval;
|
||||
}
|
||||
|
||||
static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val)
|
||||
{
|
||||
unsigned long retval;
|
||||
|
@ -14,6 +14,17 @@ static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val)
|
||||
return retval;
|
||||
}
|
||||
|
||||
static inline unsigned long xchg_u16(volatile u16 *m, unsigned long val)
|
||||
{
|
||||
unsigned long flags, retval;
|
||||
|
||||
local_irq_save(flags);
|
||||
retval = *m;
|
||||
*m = val;
|
||||
local_irq_restore(flags);
|
||||
return retval;
|
||||
}
|
||||
|
||||
static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val)
|
||||
{
|
||||
unsigned long flags, retval;
|
||||
|
@ -22,29 +22,8 @@ static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val)
|
||||
return retval;
|
||||
}
|
||||
|
||||
static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val)
|
||||
{
|
||||
unsigned long retval;
|
||||
unsigned long tmp;
|
||||
|
||||
__asm__ __volatile__ (
|
||||
"1: \n\t"
|
||||
"movli.l @%2, %0 ! xchg_u8 \n\t"
|
||||
"mov %0, %1 \n\t"
|
||||
"mov %3, %0 \n\t"
|
||||
"movco.l %0, @%2 \n\t"
|
||||
"bf 1b \n\t"
|
||||
"synco \n\t"
|
||||
: "=&z"(tmp), "=&r" (retval)
|
||||
: "r" (m), "r" (val & 0xff)
|
||||
: "t", "memory"
|
||||
);
|
||||
|
||||
return retval;
|
||||
}
|
||||
|
||||
static inline unsigned long
|
||||
__cmpxchg_u32(volatile int *m, unsigned long old, unsigned long new)
|
||||
__cmpxchg_u32(volatile u32 *m, unsigned long old, unsigned long new)
|
||||
{
|
||||
unsigned long retval;
|
||||
unsigned long tmp;
|
||||
@ -68,4 +47,6 @@ __cmpxchg_u32(volatile int *m, unsigned long old, unsigned long new)
|
||||
return retval;
|
||||
}
|
||||
|
||||
#include <asm/cmpxchg-xchg.h>
|
||||
|
||||
#endif /* __ASM_SH_CMPXCHG_LLSC_H */
|
||||
|
51
arch/sh/include/asm/cmpxchg-xchg.h
Normal file
51
arch/sh/include/asm/cmpxchg-xchg.h
Normal file
@ -0,0 +1,51 @@
|
||||
#ifndef __ASM_SH_CMPXCHG_XCHG_H
|
||||
#define __ASM_SH_CMPXCHG_XCHG_H
|
||||
|
||||
/*
|
||||
* Copyright (C) 2016 Red Hat, Inc.
|
||||
* Author: Michael S. Tsirkin <mst@redhat.com>
|
||||
*
|
||||
* This work is licensed under the terms of the GNU GPL, version 2. See the
|
||||
* file "COPYING" in the main directory of this archive for more details.
|
||||
*/
|
||||
#include <linux/bitops.h>
|
||||
#include <asm/byteorder.h>
|
||||
|
||||
/*
|
||||
* Portable implementations of 1 and 2 byte xchg using a 4 byte cmpxchg.
|
||||
* Note: this header isn't self-contained: before including it, __cmpxchg_u32
|
||||
* must be defined first.
|
||||
*/
|
||||
static inline u32 __xchg_cmpxchg(volatile void *ptr, u32 x, int size)
|
||||
{
|
||||
int off = (unsigned long)ptr % sizeof(u32);
|
||||
volatile u32 *p = ptr - off;
|
||||
#ifdef __BIG_ENDIAN
|
||||
int bitoff = (sizeof(u32) - 1 - off) * BITS_PER_BYTE;
|
||||
#else
|
||||
int bitoff = off * BITS_PER_BYTE;
|
||||
#endif
|
||||
u32 bitmask = ((0x1 << size * BITS_PER_BYTE) - 1) << bitoff;
|
||||
u32 oldv, newv;
|
||||
u32 ret;
|
||||
|
||||
do {
|
||||
oldv = READ_ONCE(*p);
|
||||
ret = (oldv & bitmask) >> bitoff;
|
||||
newv = (oldv & ~bitmask) | (x << bitoff);
|
||||
} while (__cmpxchg_u32(p, oldv, newv) != oldv);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline unsigned long xchg_u16(volatile u16 *m, unsigned long val)
|
||||
{
|
||||
return __xchg_cmpxchg(m, val, sizeof *m);
|
||||
}
|
||||
|
||||
static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val)
|
||||
{
|
||||
return __xchg_cmpxchg(m, val, sizeof *m);
|
||||
}
|
||||
|
||||
#endif /* __ASM_SH_CMPXCHG_XCHG_H */
|
@ -27,6 +27,9 @@ extern void __xchg_called_with_bad_pointer(void);
|
||||
case 4: \
|
||||
__xchg__res = xchg_u32(__xchg_ptr, x); \
|
||||
break; \
|
||||
case 2: \
|
||||
__xchg__res = xchg_u16(__xchg_ptr, x); \
|
||||
break; \
|
||||
case 1: \
|
||||
__xchg__res = xchg_u8(__xchg_ptr, x); \
|
||||
break; \
|
||||
|
@ -1,7 +1,6 @@
|
||||
#ifndef __SPARC_BARRIER_H
|
||||
#define __SPARC_BARRIER_H
|
||||
|
||||
#include <asm/processor.h> /* for nop() */
|
||||
#include <asm-generic/barrier.h>
|
||||
|
||||
#endif /* !(__SPARC_BARRIER_H) */
|
||||
|
@ -37,33 +37,14 @@ do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \
|
||||
#define rmb() __asm__ __volatile__("":::"memory")
|
||||
#define wmb() __asm__ __volatile__("":::"memory")
|
||||
|
||||
#define dma_rmb() rmb()
|
||||
#define dma_wmb() wmb()
|
||||
|
||||
#define smp_store_mb(__var, __value) \
|
||||
do { WRITE_ONCE(__var, __value); membar_safe("#StoreLoad"); } while(0)
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
#define smp_mb() mb()
|
||||
#define smp_rmb() rmb()
|
||||
#define smp_wmb() wmb()
|
||||
#else
|
||||
#define smp_mb() __asm__ __volatile__("":::"memory")
|
||||
#define smp_rmb() __asm__ __volatile__("":::"memory")
|
||||
#define smp_wmb() __asm__ __volatile__("":::"memory")
|
||||
#endif
|
||||
|
||||
#define read_barrier_depends() do { } while (0)
|
||||
#define smp_read_barrier_depends() do { } while (0)
|
||||
|
||||
#define smp_store_release(p, v) \
|
||||
#define __smp_store_release(p, v) \
|
||||
do { \
|
||||
compiletime_assert_atomic_type(*p); \
|
||||
barrier(); \
|
||||
WRITE_ONCE(*p, v); \
|
||||
} while (0)
|
||||
|
||||
#define smp_load_acquire(p) \
|
||||
#define __smp_load_acquire(p) \
|
||||
({ \
|
||||
typeof(*p) ___p1 = READ_ONCE(*p); \
|
||||
compiletime_assert_atomic_type(*p); \
|
||||
@ -71,7 +52,9 @@ do { \
|
||||
___p1; \
|
||||
})
|
||||
|
||||
#define smp_mb__before_atomic() barrier()
|
||||
#define smp_mb__after_atomic() barrier()
|
||||
#define __smp_mb__before_atomic() barrier()
|
||||
#define __smp_mb__after_atomic() barrier()
|
||||
|
||||
#include <asm-generic/barrier.h>
|
||||
|
||||
#endif /* !(__SPARC64_BARRIER_H) */
|
||||
|
@ -5,7 +5,4 @@
|
||||
#else
|
||||
#include <asm/processor_32.h>
|
||||
#endif
|
||||
|
||||
#define nop() __asm__ __volatile__ ("nop")
|
||||
|
||||
#endif
|
||||
|
@ -79,11 +79,12 @@ mb_incoherent(void)
|
||||
* But after the word is updated, the routine issues an "mf" before returning,
|
||||
* and since it's a function call, we don't even need a compiler barrier.
|
||||
*/
|
||||
#define smp_mb__before_atomic() smp_mb()
|
||||
#define smp_mb__after_atomic() do { } while (0)
|
||||
#define __smp_mb__before_atomic() __smp_mb()
|
||||
#define __smp_mb__after_atomic() do { } while (0)
|
||||
#define smp_mb__after_atomic() __smp_mb__after_atomic()
|
||||
#else /* 64 bit */
|
||||
#define smp_mb__before_atomic() smp_mb()
|
||||
#define smp_mb__after_atomic() smp_mb()
|
||||
#define __smp_mb__before_atomic() __smp_mb()
|
||||
#define __smp_mb__after_atomic() __smp_mb()
|
||||
#endif
|
||||
|
||||
#include <asm-generic/barrier.h>
|
||||
|
@ -31,20 +31,10 @@
|
||||
#endif
|
||||
#define dma_wmb() barrier()
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
#define smp_mb() mb()
|
||||
#define smp_rmb() dma_rmb()
|
||||
#define smp_wmb() barrier()
|
||||
#define smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0)
|
||||
#else /* !SMP */
|
||||
#define smp_mb() barrier()
|
||||
#define smp_rmb() barrier()
|
||||
#define smp_wmb() barrier()
|
||||
#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); barrier(); } while (0)
|
||||
#endif /* SMP */
|
||||
|
||||
#define read_barrier_depends() do { } while (0)
|
||||
#define smp_read_barrier_depends() do { } while (0)
|
||||
#define __smp_mb() mb()
|
||||
#define __smp_rmb() dma_rmb()
|
||||
#define __smp_wmb() barrier()
|
||||
#define __smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0)
|
||||
|
||||
#if defined(CONFIG_X86_PPRO_FENCE)
|
||||
|
||||
@ -53,31 +43,31 @@
|
||||
* model and we should fall back to full barriers.
|
||||
*/
|
||||
|
||||
#define smp_store_release(p, v) \
|
||||
#define __smp_store_release(p, v) \
|
||||
do { \
|
||||
compiletime_assert_atomic_type(*p); \
|
||||
smp_mb(); \
|
||||
__smp_mb(); \
|
||||
WRITE_ONCE(*p, v); \
|
||||
} while (0)
|
||||
|
||||
#define smp_load_acquire(p) \
|
||||
#define __smp_load_acquire(p) \
|
||||
({ \
|
||||
typeof(*p) ___p1 = READ_ONCE(*p); \
|
||||
compiletime_assert_atomic_type(*p); \
|
||||
smp_mb(); \
|
||||
__smp_mb(); \
|
||||
___p1; \
|
||||
})
|
||||
|
||||
#else /* regular x86 TSO memory ordering */
|
||||
|
||||
#define smp_store_release(p, v) \
|
||||
#define __smp_store_release(p, v) \
|
||||
do { \
|
||||
compiletime_assert_atomic_type(*p); \
|
||||
barrier(); \
|
||||
WRITE_ONCE(*p, v); \
|
||||
} while (0)
|
||||
|
||||
#define smp_load_acquire(p) \
|
||||
#define __smp_load_acquire(p) \
|
||||
({ \
|
||||
typeof(*p) ___p1 = READ_ONCE(*p); \
|
||||
compiletime_assert_atomic_type(*p); \
|
||||
@ -88,7 +78,9 @@ do { \
|
||||
#endif
|
||||
|
||||
/* Atomic operations are already serializing on x86 */
|
||||
#define smp_mb__before_atomic() barrier()
|
||||
#define smp_mb__after_atomic() barrier()
|
||||
#define __smp_mb__before_atomic() barrier()
|
||||
#define __smp_mb__after_atomic() barrier()
|
||||
|
||||
#include <asm-generic/barrier.h>
|
||||
|
||||
#endif /* _ASM_X86_BARRIER_H */
|
||||
|
@ -36,13 +36,6 @@
|
||||
#endif /* CONFIG_X86_PPRO_FENCE */
|
||||
#define dma_wmb() barrier()
|
||||
|
||||
#define smp_mb() barrier()
|
||||
#define smp_rmb() barrier()
|
||||
#define smp_wmb() barrier()
|
||||
|
||||
#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); barrier(); } while (0)
|
||||
|
||||
#define read_barrier_depends() do { } while (0)
|
||||
#define smp_read_barrier_depends() do { } while (0)
|
||||
#include <asm-generic/barrier.h>
|
||||
|
||||
#endif
|
||||
|
@ -13,8 +13,8 @@
|
||||
#define rmb() barrier()
|
||||
#define wmb() mb()
|
||||
|
||||
#define smp_mb__before_atomic() barrier()
|
||||
#define smp_mb__after_atomic() barrier()
|
||||
#define __smp_mb__before_atomic() barrier()
|
||||
#define __smp_mb__after_atomic() barrier()
|
||||
|
||||
#include <asm-generic/barrier.h>
|
||||
|
||||
|
@ -130,7 +130,7 @@ int virtio_gpu_driver_load(struct drm_device *dev, unsigned long flags)
|
||||
static vq_callback_t *callbacks[] = {
|
||||
virtio_gpu_ctrl_ack, virtio_gpu_cursor_ack
|
||||
};
|
||||
static const char *names[] = { "control", "cursor" };
|
||||
static const char * const names[] = { "control", "cursor" };
|
||||
|
||||
struct virtio_gpu_device *vgdev;
|
||||
/* this will expand later */
|
||||
|
@ -311,7 +311,7 @@ unmap:
|
||||
static int mic_find_vqs(struct virtio_device *vdev, unsigned nvqs,
|
||||
struct virtqueue *vqs[],
|
||||
vq_callback_t *callbacks[],
|
||||
const char *names[])
|
||||
const char * const names[])
|
||||
{
|
||||
struct mic_vdev *mvdev = to_micvdev(vdev);
|
||||
struct mic_device_ctrl __iomem *dc = mvdev->dc;
|
||||
|
@ -147,7 +147,7 @@ static void rproc_virtio_del_vqs(struct virtio_device *vdev)
|
||||
static int rproc_virtio_find_vqs(struct virtio_device *vdev, unsigned nvqs,
|
||||
struct virtqueue *vqs[],
|
||||
vq_callback_t *callbacks[],
|
||||
const char *names[])
|
||||
const char * const names[])
|
||||
{
|
||||
struct rproc *rproc = vdev_to_rproc(vdev);
|
||||
int i, ret;
|
||||
|
@ -945,7 +945,7 @@ static void rpmsg_ns_cb(struct rpmsg_channel *rpdev, void *data, int len,
|
||||
static int rpmsg_probe(struct virtio_device *vdev)
|
||||
{
|
||||
vq_callback_t *vq_cbs[] = { rpmsg_recv_done, rpmsg_xmit_done };
|
||||
const char *names[] = { "input", "output" };
|
||||
static const char * const names[] = { "input", "output" };
|
||||
struct virtqueue *vqs[2];
|
||||
struct virtproc_info *vrp;
|
||||
void *bufs_va;
|
||||
|
@ -255,7 +255,7 @@ static void kvm_del_vqs(struct virtio_device *vdev)
|
||||
static int kvm_find_vqs(struct virtio_device *vdev, unsigned nvqs,
|
||||
struct virtqueue *vqs[],
|
||||
vq_callback_t *callbacks[],
|
||||
const char *names[])
|
||||
const char * const names[])
|
||||
{
|
||||
struct kvm_device *kdev = to_kvmdev(vdev);
|
||||
int i;
|
||||
|
@ -635,7 +635,7 @@ out:
|
||||
static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs,
|
||||
struct virtqueue *vqs[],
|
||||
vq_callback_t *callbacks[],
|
||||
const char *names[])
|
||||
const char * const names[])
|
||||
{
|
||||
struct virtio_ccw_device *vcdev = to_vc_device(vdev);
|
||||
unsigned long *indicatorp = NULL;
|
||||
|
@ -209,8 +209,8 @@ static unsigned leak_balloon(struct virtio_balloon *vb, size_t num)
|
||||
*/
|
||||
if (vb->num_pfns != 0)
|
||||
tell_host(vb, vb->deflate_vq);
|
||||
mutex_unlock(&vb->balloon_lock);
|
||||
release_pages_balloon(vb);
|
||||
mutex_unlock(&vb->balloon_lock);
|
||||
return num_freed_pages;
|
||||
}
|
||||
|
||||
@ -388,7 +388,7 @@ static int init_vqs(struct virtio_balloon *vb)
|
||||
{
|
||||
struct virtqueue *vqs[3];
|
||||
vq_callback_t *callbacks[] = { balloon_ack, balloon_ack, stats_request };
|
||||
const char *names[] = { "inflate", "deflate", "stats" };
|
||||
static const char * const names[] = { "inflate", "deflate", "stats" };
|
||||
int err, nvqs;
|
||||
|
||||
/*
|
||||
|
@ -170,7 +170,7 @@ static int virtinput_init_vqs(struct virtio_input *vi)
|
||||
struct virtqueue *vqs[2];
|
||||
vq_callback_t *cbs[] = { virtinput_recv_events,
|
||||
virtinput_recv_status };
|
||||
static const char *names[] = { "events", "status" };
|
||||
static const char * const names[] = { "events", "status" };
|
||||
int err;
|
||||
|
||||
err = vi->vdev->config->find_vqs(vi->vdev, 2, vqs, cbs, names);
|
||||
|
@ -482,7 +482,7 @@ error_available:
|
||||
static int vm_find_vqs(struct virtio_device *vdev, unsigned nvqs,
|
||||
struct virtqueue *vqs[],
|
||||
vq_callback_t *callbacks[],
|
||||
const char *names[])
|
||||
const char * const names[])
|
||||
{
|
||||
struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
|
||||
unsigned int irq = platform_get_irq(vm_dev->pdev, 0);
|
||||
|
@ -296,7 +296,7 @@ void vp_del_vqs(struct virtio_device *vdev)
|
||||
static int vp_try_to_find_vqs(struct virtio_device *vdev, unsigned nvqs,
|
||||
struct virtqueue *vqs[],
|
||||
vq_callback_t *callbacks[],
|
||||
const char *names[],
|
||||
const char * const names[],
|
||||
bool use_msix,
|
||||
bool per_vq_vectors)
|
||||
{
|
||||
@ -376,7 +376,7 @@ error_find:
|
||||
int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs,
|
||||
struct virtqueue *vqs[],
|
||||
vq_callback_t *callbacks[],
|
||||
const char *names[])
|
||||
const char * const names[])
|
||||
{
|
||||
int err;
|
||||
|
||||
|
@ -139,7 +139,7 @@ void vp_del_vqs(struct virtio_device *vdev);
|
||||
int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs,
|
||||
struct virtqueue *vqs[],
|
||||
vq_callback_t *callbacks[],
|
||||
const char *names[]);
|
||||
const char * const names[]);
|
||||
const char *vp_bus_name(struct virtio_device *vdev);
|
||||
|
||||
/* Setup the affinity for a virtqueue:
|
||||
|
@ -418,7 +418,7 @@ err_new_queue:
|
||||
static int vp_modern_find_vqs(struct virtio_device *vdev, unsigned nvqs,
|
||||
struct virtqueue *vqs[],
|
||||
vq_callback_t *callbacks[],
|
||||
const char *names[])
|
||||
const char * const names[])
|
||||
{
|
||||
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
|
||||
struct virtqueue *vq;
|
||||
|
@ -517,10 +517,10 @@ void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
|
||||
/* If we expect an interrupt for the next entry, tell host
|
||||
* by writing event index and flush out the write before
|
||||
* the read in the next get_buf call. */
|
||||
if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) {
|
||||
vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, vq->last_used_idx);
|
||||
virtio_mb(vq->weak_barriers);
|
||||
}
|
||||
if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT))
|
||||
virtio_store_mb(vq->weak_barriers,
|
||||
&vring_used_event(&vq->vring),
|
||||
cpu_to_virtio16(_vq->vdev, vq->last_used_idx));
|
||||
|
||||
#ifdef DEBUG
|
||||
vq->last_add_time_valid = false;
|
||||
@ -653,8 +653,11 @@ bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
|
||||
}
|
||||
/* TODO: tune this threshold */
|
||||
bufs = (u16)(vq->avail_idx_shadow - vq->last_used_idx) * 3 / 4;
|
||||
vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs);
|
||||
virtio_mb(vq->weak_barriers);
|
||||
|
||||
virtio_store_mb(vq->weak_barriers,
|
||||
&vring_used_event(&vq->vring),
|
||||
cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs));
|
||||
|
||||
if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->vring.used->idx) - vq->last_used_idx) > bufs)) {
|
||||
END_USE(vq);
|
||||
return false;
|
||||
|
@ -41,6 +41,7 @@
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/cpu.h>
|
||||
|
||||
#include <asm/barrier.h>
|
||||
#include <asm/sync_bitops.h>
|
||||
#include <asm/xen/hypercall.h>
|
||||
#include <asm/xen/hypervisor.h>
|
||||
@ -296,7 +297,7 @@ static void consume_one_event(unsigned cpu,
|
||||
* control block.
|
||||
*/
|
||||
if (head == 0) {
|
||||
rmb(); /* Ensure word is up-to-date before reading head. */
|
||||
virt_rmb(); /* Ensure word is up-to-date before reading head. */
|
||||
head = control_block->head[priority];
|
||||
}
|
||||
|
||||
|
@ -123,14 +123,14 @@ int xb_write(const void *data, unsigned len)
|
||||
avail = len;
|
||||
|
||||
/* Must write data /after/ reading the consumer index. */
|
||||
mb();
|
||||
virt_mb();
|
||||
|
||||
memcpy(dst, data, avail);
|
||||
data += avail;
|
||||
len -= avail;
|
||||
|
||||
/* Other side must not see new producer until data is there. */
|
||||
wmb();
|
||||
virt_wmb();
|
||||
intf->req_prod += avail;
|
||||
|
||||
/* Implies mb(): other side will see the updated producer. */
|
||||
@ -180,14 +180,14 @@ int xb_read(void *data, unsigned len)
|
||||
avail = len;
|
||||
|
||||
/* Must read data /after/ reading the producer index. */
|
||||
rmb();
|
||||
virt_rmb();
|
||||
|
||||
memcpy(data, src, avail);
|
||||
data += avail;
|
||||
len -= avail;
|
||||
|
||||
/* Other side must not see free space until we've copied out */
|
||||
mb();
|
||||
virt_mb();
|
||||
intf->rsp_cons += avail;
|
||||
|
||||
pr_debug("Finished read of %i bytes (%i to go)\n", avail, len);
|
||||
|
@ -54,22 +54,38 @@
|
||||
#define read_barrier_depends() do { } while (0)
|
||||
#endif
|
||||
|
||||
#ifndef __smp_mb
|
||||
#define __smp_mb() mb()
|
||||
#endif
|
||||
|
||||
#ifndef __smp_rmb
|
||||
#define __smp_rmb() rmb()
|
||||
#endif
|
||||
|
||||
#ifndef __smp_wmb
|
||||
#define __smp_wmb() wmb()
|
||||
#endif
|
||||
|
||||
#ifndef __smp_read_barrier_depends
|
||||
#define __smp_read_barrier_depends() read_barrier_depends()
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
#ifndef smp_mb
|
||||
#define smp_mb() mb()
|
||||
#define smp_mb() __smp_mb()
|
||||
#endif
|
||||
|
||||
#ifndef smp_rmb
|
||||
#define smp_rmb() rmb()
|
||||
#define smp_rmb() __smp_rmb()
|
||||
#endif
|
||||
|
||||
#ifndef smp_wmb
|
||||
#define smp_wmb() wmb()
|
||||
#define smp_wmb() __smp_wmb()
|
||||
#endif
|
||||
|
||||
#ifndef smp_read_barrier_depends
|
||||
#define smp_read_barrier_depends() read_barrier_depends()
|
||||
#define smp_read_barrier_depends() __smp_read_barrier_depends()
|
||||
#endif
|
||||
|
||||
#else /* !CONFIG_SMP */
|
||||
@ -92,32 +108,104 @@
|
||||
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
#ifndef __smp_store_mb
|
||||
#define __smp_store_mb(var, value) do { WRITE_ONCE(var, value); __smp_mb(); } while (0)
|
||||
#endif
|
||||
|
||||
#ifndef __smp_mb__before_atomic
|
||||
#define __smp_mb__before_atomic() __smp_mb()
|
||||
#endif
|
||||
|
||||
#ifndef __smp_mb__after_atomic
|
||||
#define __smp_mb__after_atomic() __smp_mb()
|
||||
#endif
|
||||
|
||||
#ifndef __smp_store_release
|
||||
#define __smp_store_release(p, v) \
|
||||
do { \
|
||||
compiletime_assert_atomic_type(*p); \
|
||||
__smp_mb(); \
|
||||
WRITE_ONCE(*p, v); \
|
||||
} while (0)
|
||||
#endif
|
||||
|
||||
#ifndef __smp_load_acquire
|
||||
#define __smp_load_acquire(p) \
|
||||
({ \
|
||||
typeof(*p) ___p1 = READ_ONCE(*p); \
|
||||
compiletime_assert_atomic_type(*p); \
|
||||
__smp_mb(); \
|
||||
___p1; \
|
||||
})
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
#ifndef smp_store_mb
|
||||
#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0)
|
||||
#define smp_store_mb(var, value) __smp_store_mb(var, value)
|
||||
#endif
|
||||
|
||||
#ifndef smp_mb__before_atomic
|
||||
#define smp_mb__before_atomic() smp_mb()
|
||||
#define smp_mb__before_atomic() __smp_mb__before_atomic()
|
||||
#endif
|
||||
|
||||
#ifndef smp_mb__after_atomic
|
||||
#define smp_mb__after_atomic() smp_mb()
|
||||
#define smp_mb__after_atomic() __smp_mb__after_atomic()
|
||||
#endif
|
||||
|
||||
#ifndef smp_store_release
|
||||
#define smp_store_release(p, v) __smp_store_release(p, v)
|
||||
#endif
|
||||
|
||||
#ifndef smp_load_acquire
|
||||
#define smp_load_acquire(p) __smp_load_acquire(p)
|
||||
#endif
|
||||
|
||||
#else /* !CONFIG_SMP */
|
||||
|
||||
#ifndef smp_store_mb
|
||||
#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); barrier(); } while (0)
|
||||
#endif
|
||||
|
||||
#ifndef smp_mb__before_atomic
|
||||
#define smp_mb__before_atomic() barrier()
|
||||
#endif
|
||||
|
||||
#ifndef smp_mb__after_atomic
|
||||
#define smp_mb__after_atomic() barrier()
|
||||
#endif
|
||||
|
||||
#ifndef smp_store_release
|
||||
#define smp_store_release(p, v) \
|
||||
do { \
|
||||
compiletime_assert_atomic_type(*p); \
|
||||
smp_mb(); \
|
||||
barrier(); \
|
||||
WRITE_ONCE(*p, v); \
|
||||
} while (0)
|
||||
#endif
|
||||
|
||||
#ifndef smp_load_acquire
|
||||
#define smp_load_acquire(p) \
|
||||
({ \
|
||||
typeof(*p) ___p1 = READ_ONCE(*p); \
|
||||
compiletime_assert_atomic_type(*p); \
|
||||
smp_mb(); \
|
||||
barrier(); \
|
||||
___p1; \
|
||||
})
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
/* Barriers for virtual machine guests when talking to an SMP host */
|
||||
#define virt_mb() __smp_mb()
|
||||
#define virt_rmb() __smp_rmb()
|
||||
#define virt_wmb() __smp_wmb()
|
||||
#define virt_read_barrier_depends() __smp_read_barrier_depends()
|
||||
#define virt_store_mb(var, value) __smp_store_mb(var, value)
|
||||
#define virt_mb__before_atomic() __smp_mb__before_atomic()
|
||||
#define virt_mb__after_atomic() __smp_mb__after_atomic()
|
||||
#define virt_store_release(p, v) __smp_store_release(p, v)
|
||||
#define virt_load_acquire(p) __smp_load_acquire(p)
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
#endif /* __ASM_GENERIC_BARRIER_H */
|
||||
|
@ -70,7 +70,7 @@ struct virtio_config_ops {
|
||||
int (*find_vqs)(struct virtio_device *, unsigned nvqs,
|
||||
struct virtqueue *vqs[],
|
||||
vq_callback_t *callbacks[],
|
||||
const char *names[]);
|
||||
const char * const names[]);
|
||||
void (*del_vqs)(struct virtio_device *);
|
||||
u64 (*get_features)(struct virtio_device *vdev);
|
||||
int (*finalize_features)(struct virtio_device *vdev);
|
||||
|
@ -12,7 +12,7 @@
|
||||
* anyone care?
|
||||
*
|
||||
* For virtio_pci on SMP, we don't need to order with respect to MMIO
|
||||
* accesses through relaxed memory I/O windows, so smp_mb() et al are
|
||||
* accesses through relaxed memory I/O windows, so virt_mb() et al are
|
||||
* sufficient.
|
||||
*
|
||||
* For using virtio to talk to real devices (eg. other heterogeneous
|
||||
@ -23,18 +23,16 @@
|
||||
|
||||
static inline void virtio_mb(bool weak_barriers)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
if (weak_barriers)
|
||||
smp_mb();
|
||||
virt_mb();
|
||||
else
|
||||
#endif
|
||||
mb();
|
||||
}
|
||||
|
||||
static inline void virtio_rmb(bool weak_barriers)
|
||||
{
|
||||
if (weak_barriers)
|
||||
dma_rmb();
|
||||
virt_rmb();
|
||||
else
|
||||
rmb();
|
||||
}
|
||||
@ -42,11 +40,22 @@ static inline void virtio_rmb(bool weak_barriers)
|
||||
static inline void virtio_wmb(bool weak_barriers)
|
||||
{
|
||||
if (weak_barriers)
|
||||
dma_wmb();
|
||||
virt_wmb();
|
||||
else
|
||||
wmb();
|
||||
}
|
||||
|
||||
static inline void virtio_store_mb(bool weak_barriers,
|
||||
__virtio16 *p, __virtio16 v)
|
||||
{
|
||||
if (weak_barriers) {
|
||||
virt_store_mb(*p, v);
|
||||
} else {
|
||||
WRITE_ONCE(*p, v);
|
||||
mb();
|
||||
}
|
||||
}
|
||||
|
||||
struct virtio_device;
|
||||
struct virtqueue;
|
||||
|
||||
|
@ -208,12 +208,12 @@ struct __name##_back_ring { \
|
||||
|
||||
|
||||
#define RING_PUSH_REQUESTS(_r) do { \
|
||||
wmb(); /* back sees requests /before/ updated producer index */ \
|
||||
virt_wmb(); /* back sees requests /before/ updated producer index */ \
|
||||
(_r)->sring->req_prod = (_r)->req_prod_pvt; \
|
||||
} while (0)
|
||||
|
||||
#define RING_PUSH_RESPONSES(_r) do { \
|
||||
wmb(); /* front sees responses /before/ updated producer index */ \
|
||||
virt_wmb(); /* front sees responses /before/ updated producer index */ \
|
||||
(_r)->sring->rsp_prod = (_r)->rsp_prod_pvt; \
|
||||
} while (0)
|
||||
|
||||
@ -250,9 +250,9 @@ struct __name##_back_ring { \
|
||||
#define RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(_r, _notify) do { \
|
||||
RING_IDX __old = (_r)->sring->req_prod; \
|
||||
RING_IDX __new = (_r)->req_prod_pvt; \
|
||||
wmb(); /* back sees requests /before/ updated producer index */ \
|
||||
virt_wmb(); /* back sees requests /before/ updated producer index */ \
|
||||
(_r)->sring->req_prod = __new; \
|
||||
mb(); /* back sees new requests /before/ we check req_event */ \
|
||||
virt_mb(); /* back sees new requests /before/ we check req_event */ \
|
||||
(_notify) = ((RING_IDX)(__new - (_r)->sring->req_event) < \
|
||||
(RING_IDX)(__new - __old)); \
|
||||
} while (0)
|
||||
@ -260,9 +260,9 @@ struct __name##_back_ring { \
|
||||
#define RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(_r, _notify) do { \
|
||||
RING_IDX __old = (_r)->sring->rsp_prod; \
|
||||
RING_IDX __new = (_r)->rsp_prod_pvt; \
|
||||
wmb(); /* front sees responses /before/ updated producer index */ \
|
||||
virt_wmb(); /* front sees responses /before/ updated producer index */ \
|
||||
(_r)->sring->rsp_prod = __new; \
|
||||
mb(); /* front sees new responses /before/ we check rsp_event */ \
|
||||
virt_mb(); /* front sees new responses /before/ we check rsp_event */ \
|
||||
(_notify) = ((RING_IDX)(__new - (_r)->sring->rsp_event) < \
|
||||
(RING_IDX)(__new - __old)); \
|
||||
} while (0)
|
||||
@ -271,7 +271,7 @@ struct __name##_back_ring { \
|
||||
(_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \
|
||||
if (_work_to_do) break; \
|
||||
(_r)->sring->req_event = (_r)->req_cons + 1; \
|
||||
mb(); \
|
||||
virt_mb(); \
|
||||
(_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \
|
||||
} while (0)
|
||||
|
||||
@ -279,7 +279,7 @@ struct __name##_back_ring { \
|
||||
(_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \
|
||||
if (_work_to_do) break; \
|
||||
(_r)->sring->rsp_event = (_r)->rsp_cons + 1; \
|
||||
mb(); \
|
||||
virt_mb(); \
|
||||
(_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \
|
||||
} while (0)
|
||||
|
||||
|
@ -61,6 +61,7 @@ struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info)
|
||||
bool dequeued_page;
|
||||
|
||||
dequeued_page = false;
|
||||
spin_lock_irqsave(&b_dev_info->pages_lock, flags);
|
||||
list_for_each_entry_safe(page, tmp, &b_dev_info->pages, lru) {
|
||||
/*
|
||||
* Block others from accessing the 'page' while we get around
|
||||
@ -75,15 +76,14 @@ struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info)
|
||||
continue;
|
||||
}
|
||||
#endif
|
||||
spin_lock_irqsave(&b_dev_info->pages_lock, flags);
|
||||
balloon_page_delete(page);
|
||||
__count_vm_event(BALLOON_DEFLATE);
|
||||
spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
|
||||
unlock_page(page);
|
||||
dequeued_page = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
|
||||
|
||||
if (!dequeued_page) {
|
||||
/*
|
||||
|
@ -5116,13 +5116,44 @@ sub process {
|
||||
}
|
||||
}
|
||||
# check for memory barriers without a comment.
|
||||
if ($line =~ /\b(mb|rmb|wmb|read_barrier_depends|smp_mb|smp_rmb|smp_wmb|smp_read_barrier_depends)\(/) {
|
||||
|
||||
my $barriers = qr{
|
||||
mb|
|
||||
rmb|
|
||||
wmb|
|
||||
read_barrier_depends
|
||||
}x;
|
||||
my $barrier_stems = qr{
|
||||
mb__before_atomic|
|
||||
mb__after_atomic|
|
||||
store_release|
|
||||
load_acquire|
|
||||
store_mb|
|
||||
(?:$barriers)
|
||||
}x;
|
||||
my $all_barriers = qr{
|
||||
(?:$barriers)|
|
||||
smp_(?:$barrier_stems)|
|
||||
virt_(?:$barrier_stems)
|
||||
}x;
|
||||
|
||||
if ($line =~ /\b(?:$all_barriers)\s*\(/) {
|
||||
if (!ctx_has_comment($first_line, $linenr)) {
|
||||
WARN("MEMORY_BARRIER",
|
||||
"memory barrier without comment\n" . $herecurr);
|
||||
}
|
||||
}
|
||||
|
||||
my $underscore_smp_barriers = qr{__smp_(?:$barrier_stems)}x;
|
||||
|
||||
if ($realfile !~ m@^include/asm-generic/@ &&
|
||||
$realfile !~ m@/barrier\.h$@ &&
|
||||
$line =~ m/\b(?:$underscore_smp_barriers)\s*\(/ &&
|
||||
$line !~ m/^.\s*\#\s*define\s+(?:$underscore_smp_barriers)\s*\(/) {
|
||||
WARN("MEMORY_BARRIER",
|
||||
"__smp memory barriers shouldn't be used outside barrier.h and asm-generic\n" . $herecurr);
|
||||
}
|
||||
|
||||
# check for waitqueue_active without a comment.
|
||||
if ($line =~ /\bwaitqueue_active\s*\(/) {
|
||||
if (!ctx_has_comment($first_line, $linenr)) {
|
||||
|
Loading…
Reference in New Issue
Block a user