mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-11-27 12:04:22 +08:00
A set of locking fixes and updates:
- Untangle the header spaghetti which causes build failures in various situations caused by the lockdep additions to seqcount to validate that the write side critical sections are non-preemptible. - The seqcount associated lock debug addons which were blocked by the above fallout. seqcount writers contrary to seqlock writers must be externally serialized, which usually happens via locking - except for strict per CPU seqcounts. As the lock is not part of the seqcount, lockdep cannot validate that the lock is held. This new debug mechanism adds the concept of associated locks. sequence count has now lock type variants and corresponding initializers which take a pointer to the associated lock used for writer serialization. If lockdep is enabled the pointer is stored and write_seqcount_begin() has a lockdep assertion to validate that the lock is held. Aside of the type and the initializer no other code changes are required at the seqcount usage sites. The rest of the seqcount API is unchanged and determines the type at compile time with the help of _Generic which is possible now that the minimal GCC version has been moved up. Adding this lockdep coverage unearthed a handful of seqcount bugs which have been addressed already independent of this. While generaly useful this comes with a Trojan Horse twist: On RT kernels the write side critical section can become preemtible if the writers are serialized by an associated lock, which leads to the well known reader preempts writer livelock. RT prevents this by storing the associated lock pointer independent of lockdep in the seqcount and changing the reader side to block on the lock when a reader detects that a writer is in the write side critical section. - Conversion of seqcount usage sites to associated types and initializers. -----BEGIN PGP SIGNATURE----- iQJHBAABCgAxFiEEQp8+kY+LLUocC4bMphj1TA10mKEFAl8xmPYTHHRnbHhAbGlu dXRyb25peC5kZQAKCRCmGPVMDXSYoTuQEACyzQCjU8PgehPp9oMqWzaX2fcVyuZO QU2yw6gmz2oTz3ZHUNwdW8UnzGh2OWosK3kDruoD9FtSS51lER1/ISfSPCGfyqxC KTjOcB1Kvxwq/3LcCx7Zi3ZxWApat74qs3EhYhKtEiQ2Y9xv9rLq8VV1UWAwyxq0 eHpjlIJ6b6rbt+ARslaB7drnccOsdK+W/roNj4kfyt+gezjBfojGRdMGQNMFcpnv shuTC+vYurAVIiVA/0IuizgHfwZiXOtVpjVoEWaxg6bBH6HNuYMYzdSa/YrlDkZs n/aBI/Xkvx+Eacu8b1Zwmbzs5EnikUK/2dMqbzXKUZK61eV4hX5c2xrnr1yGWKTs F/juh69Squ7X6VZyKVgJ9RIccVueqwR2EprXWgH3+RMice5kjnXH4zURp0GHALxa DFPfB6fawcH3Ps87kcRFvjgm6FBo0hJ1AxmsW1dY4ACFB9azFa2euW+AARDzHOy2 VRsUdhL9CGwtPjXcZ/9Rhej6fZLGBXKr8uq5QiMuvttp4b6+j9FEfBgD4S6h8csl AT2c2I9LcbWqyUM9P4S7zY/YgOZw88vHRuDH7tEBdIeoiHfrbSBU7EQ9jlAKq/59 f+Htu2Io281c005g7DEeuCYvpzSYnJnAitj5Lmp/kzk2Wn3utY1uIAVszqwf95Ul 81ppn2KlvzUK8g== =7Gj+ -----END PGP SIGNATURE----- Merge tag 'locking-urgent-2020-08-10' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip Pull locking updates from Thomas Gleixner: "A set of locking fixes and updates: - Untangle the header spaghetti which causes build failures in various situations caused by the lockdep additions to seqcount to validate that the write side critical sections are non-preemptible. - The seqcount associated lock debug addons which were blocked by the above fallout. seqcount writers contrary to seqlock writers must be externally serialized, which usually happens via locking - except for strict per CPU seqcounts. As the lock is not part of the seqcount, lockdep cannot validate that the lock is held. This new debug mechanism adds the concept of associated locks. sequence count has now lock type variants and corresponding initializers which take a pointer to the associated lock used for writer serialization. If lockdep is enabled the pointer is stored and write_seqcount_begin() has a lockdep assertion to validate that the lock is held. Aside of the type and the initializer no other code changes are required at the seqcount usage sites. The rest of the seqcount API is unchanged and determines the type at compile time with the help of _Generic which is possible now that the minimal GCC version has been moved up. Adding this lockdep coverage unearthed a handful of seqcount bugs which have been addressed already independent of this. While generally useful this comes with a Trojan Horse twist: On RT kernels the write side critical section can become preemtible if the writers are serialized by an associated lock, which leads to the well known reader preempts writer livelock. RT prevents this by storing the associated lock pointer independent of lockdep in the seqcount and changing the reader side to block on the lock when a reader detects that a writer is in the write side critical section. - Conversion of seqcount usage sites to associated types and initializers" * tag 'locking-urgent-2020-08-10' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (25 commits) locking/seqlock, headers: Untangle the spaghetti monster locking, arch/ia64: Reduce <asm/smp.h> header dependencies by moving XTP bits into the new <asm/xtp.h> header x86/headers: Remove APIC headers from <asm/smp.h> seqcount: More consistent seqprop names seqcount: Compress SEQCNT_LOCKNAME_ZERO() seqlock: Fold seqcount_LOCKNAME_init() definition seqlock: Fold seqcount_LOCKNAME_t definition seqlock: s/__SEQ_LOCKDEP/__SEQ_LOCK/g hrtimer: Use sequence counter with associated raw spinlock kvm/eventfd: Use sequence counter with associated spinlock userfaultfd: Use sequence counter with associated spinlock NFSv4: Use sequence counter with associated spinlock iocost: Use sequence counter with associated spinlock raid5: Use sequence counter with associated spinlock vfs: Use sequence counter with associated spinlock timekeeping: Use sequence counter with associated raw spinlock xfrm: policy: Use sequence counters with associated lock netfilter: nft_set_rbtree: Use sequence counter with associated rwlock netfilter: conntrack: Use sequence counter with associated spinlock sched: tasks: Use sequence counter with associated spinlock ...
This commit is contained in:
commit
97d052ea3f
@ -87,6 +87,58 @@ Read path::
|
|||||||
} while (read_seqcount_retry(&foo_seqcount, seq));
|
} while (read_seqcount_retry(&foo_seqcount, seq));
|
||||||
|
|
||||||
|
|
||||||
|
.. _seqcount_locktype_t:
|
||||||
|
|
||||||
|
Sequence counters with associated locks (``seqcount_LOCKTYPE_t``)
|
||||||
|
-----------------------------------------------------------------
|
||||||
|
|
||||||
|
As discussed at :ref:`seqcount_t`, sequence count write side critical
|
||||||
|
sections must be serialized and non-preemptible. This variant of
|
||||||
|
sequence counters associate the lock used for writer serialization at
|
||||||
|
initialization time, which enables lockdep to validate that the write
|
||||||
|
side critical sections are properly serialized.
|
||||||
|
|
||||||
|
This lock association is a NOOP if lockdep is disabled and has neither
|
||||||
|
storage nor runtime overhead. If lockdep is enabled, the lock pointer is
|
||||||
|
stored in struct seqcount and lockdep's "lock is held" assertions are
|
||||||
|
injected at the beginning of the write side critical section to validate
|
||||||
|
that it is properly protected.
|
||||||
|
|
||||||
|
For lock types which do not implicitly disable preemption, preemption
|
||||||
|
protection is enforced in the write side function.
|
||||||
|
|
||||||
|
The following sequence counters with associated locks are defined:
|
||||||
|
|
||||||
|
- ``seqcount_spinlock_t``
|
||||||
|
- ``seqcount_raw_spinlock_t``
|
||||||
|
- ``seqcount_rwlock_t``
|
||||||
|
- ``seqcount_mutex_t``
|
||||||
|
- ``seqcount_ww_mutex_t``
|
||||||
|
|
||||||
|
The plain seqcount read and write APIs branch out to the specific
|
||||||
|
seqcount_LOCKTYPE_t implementation at compile-time. This avoids kernel
|
||||||
|
API explosion per each new seqcount LOCKTYPE.
|
||||||
|
|
||||||
|
Initialization (replace "LOCKTYPE" with one of the supported locks)::
|
||||||
|
|
||||||
|
/* dynamic */
|
||||||
|
seqcount_LOCKTYPE_t foo_seqcount;
|
||||||
|
seqcount_LOCKTYPE_init(&foo_seqcount, &lock);
|
||||||
|
|
||||||
|
/* static */
|
||||||
|
static seqcount_LOCKTYPE_t foo_seqcount =
|
||||||
|
SEQCNT_LOCKTYPE_ZERO(foo_seqcount, &lock);
|
||||||
|
|
||||||
|
/* C99 struct init */
|
||||||
|
struct {
|
||||||
|
.seq = SEQCNT_LOCKTYPE_ZERO(foo.seq, &lock),
|
||||||
|
} foo;
|
||||||
|
|
||||||
|
Write path: same as in :ref:`seqcount_t`, while running from a context
|
||||||
|
with the associated LOCKTYPE lock acquired.
|
||||||
|
|
||||||
|
Read path: same as in :ref:`seqcount_t`.
|
||||||
|
|
||||||
.. _seqlock_t:
|
.. _seqlock_t:
|
||||||
|
|
||||||
Sequential locks (``seqlock_t``)
|
Sequential locks (``seqlock_t``)
|
||||||
|
@ -18,7 +18,6 @@
|
|||||||
#include <linux/bitops.h>
|
#include <linux/bitops.h>
|
||||||
#include <linux/irqreturn.h>
|
#include <linux/irqreturn.h>
|
||||||
|
|
||||||
#include <asm/io.h>
|
|
||||||
#include <asm/param.h>
|
#include <asm/param.h>
|
||||||
#include <asm/processor.h>
|
#include <asm/processor.h>
|
||||||
#include <asm/ptrace.h>
|
#include <asm/ptrace.h>
|
||||||
@ -44,11 +43,6 @@ ia64_get_lid (void)
|
|||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
|
|
||||||
#define XTP_OFFSET 0x1e0008
|
|
||||||
|
|
||||||
#define SMP_IRQ_REDIRECTION (1 << 0)
|
|
||||||
#define SMP_IPI_REDIRECTION (1 << 1)
|
|
||||||
|
|
||||||
#define raw_smp_processor_id() (current_thread_info()->cpu)
|
#define raw_smp_processor_id() (current_thread_info()->cpu)
|
||||||
|
|
||||||
extern struct smp_boot_data {
|
extern struct smp_boot_data {
|
||||||
@ -62,7 +56,6 @@ extern cpumask_t cpu_core_map[NR_CPUS];
|
|||||||
DECLARE_PER_CPU_SHARED_ALIGNED(cpumask_t, cpu_sibling_map);
|
DECLARE_PER_CPU_SHARED_ALIGNED(cpumask_t, cpu_sibling_map);
|
||||||
extern int smp_num_siblings;
|
extern int smp_num_siblings;
|
||||||
extern void __iomem *ipi_base_addr;
|
extern void __iomem *ipi_base_addr;
|
||||||
extern unsigned char smp_int_redirect;
|
|
||||||
|
|
||||||
extern volatile int ia64_cpu_to_sapicid[];
|
extern volatile int ia64_cpu_to_sapicid[];
|
||||||
#define cpu_physical_id(i) ia64_cpu_to_sapicid[i]
|
#define cpu_physical_id(i) ia64_cpu_to_sapicid[i]
|
||||||
@ -84,34 +77,6 @@ cpu_logical_id (int cpuid)
|
|||||||
return i;
|
return i;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* XTP control functions:
|
|
||||||
* min_xtp : route all interrupts to this CPU
|
|
||||||
* normal_xtp: nominal XTP value
|
|
||||||
* max_xtp : never deliver interrupts to this CPU.
|
|
||||||
*/
|
|
||||||
|
|
||||||
static inline void
|
|
||||||
min_xtp (void)
|
|
||||||
{
|
|
||||||
if (smp_int_redirect & SMP_IRQ_REDIRECTION)
|
|
||||||
writeb(0x00, ipi_base_addr + XTP_OFFSET); /* XTP to min */
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void
|
|
||||||
normal_xtp (void)
|
|
||||||
{
|
|
||||||
if (smp_int_redirect & SMP_IRQ_REDIRECTION)
|
|
||||||
writeb(0x08, ipi_base_addr + XTP_OFFSET); /* XTP normal */
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void
|
|
||||||
max_xtp (void)
|
|
||||||
{
|
|
||||||
if (smp_int_redirect & SMP_IRQ_REDIRECTION)
|
|
||||||
writeb(0x0f, ipi_base_addr + XTP_OFFSET); /* Set XTP to max */
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Upping and downing of CPUs */
|
/* Upping and downing of CPUs */
|
||||||
extern int __cpu_disable (void);
|
extern int __cpu_disable (void);
|
||||||
extern void __cpu_die (unsigned int cpu);
|
extern void __cpu_die (unsigned int cpu);
|
||||||
|
46
arch/ia64/include/asm/xtp.h
Normal file
46
arch/ia64/include/asm/xtp.h
Normal file
@ -0,0 +1,46 @@
|
|||||||
|
/* SPDX-License-Identifier: GPL-2.0 */
|
||||||
|
#ifndef _ASM_IA64_XTP_H
|
||||||
|
#define _ASM_IA64_XTP_H
|
||||||
|
|
||||||
|
#include <asm/io.h>
|
||||||
|
|
||||||
|
#ifdef CONFIG_SMP
|
||||||
|
|
||||||
|
#define XTP_OFFSET 0x1e0008
|
||||||
|
|
||||||
|
#define SMP_IRQ_REDIRECTION (1 << 0)
|
||||||
|
#define SMP_IPI_REDIRECTION (1 << 1)
|
||||||
|
|
||||||
|
extern unsigned char smp_int_redirect;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* XTP control functions:
|
||||||
|
* min_xtp : route all interrupts to this CPU
|
||||||
|
* normal_xtp: nominal XTP value
|
||||||
|
* max_xtp : never deliver interrupts to this CPU.
|
||||||
|
*/
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
min_xtp (void)
|
||||||
|
{
|
||||||
|
if (smp_int_redirect & SMP_IRQ_REDIRECTION)
|
||||||
|
writeb(0x00, ipi_base_addr + XTP_OFFSET); /* XTP to min */
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
normal_xtp (void)
|
||||||
|
{
|
||||||
|
if (smp_int_redirect & SMP_IRQ_REDIRECTION)
|
||||||
|
writeb(0x08, ipi_base_addr + XTP_OFFSET); /* XTP normal */
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
max_xtp (void)
|
||||||
|
{
|
||||||
|
if (smp_int_redirect & SMP_IRQ_REDIRECTION)
|
||||||
|
writeb(0x0f, ipi_base_addr + XTP_OFFSET); /* Set XTP to max */
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif /* CONFIG_SMP */
|
||||||
|
|
||||||
|
#endif /* _ASM_IA64_XTP_Hy */
|
@ -95,6 +95,7 @@
|
|||||||
#include <asm/iosapic.h>
|
#include <asm/iosapic.h>
|
||||||
#include <asm/processor.h>
|
#include <asm/processor.h>
|
||||||
#include <asm/ptrace.h>
|
#include <asm/ptrace.h>
|
||||||
|
#include <asm/xtp.h>
|
||||||
|
|
||||||
#undef DEBUG_INTERRUPT_ROUTING
|
#undef DEBUG_INTERRUPT_ROUTING
|
||||||
|
|
||||||
|
@ -25,6 +25,7 @@
|
|||||||
#include <linux/kernel_stat.h>
|
#include <linux/kernel_stat.h>
|
||||||
|
|
||||||
#include <asm/mca.h>
|
#include <asm/mca.h>
|
||||||
|
#include <asm/xtp.h>
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* 'what should we do if we get a hw irq event on an illegal vector'.
|
* 'what should we do if we get a hw irq event on an illegal vector'.
|
||||||
|
@ -47,6 +47,7 @@
|
|||||||
#include <linux/uaccess.h>
|
#include <linux/uaccess.h>
|
||||||
#include <asm/unwind.h>
|
#include <asm/unwind.h>
|
||||||
#include <asm/user.h>
|
#include <asm/user.h>
|
||||||
|
#include <asm/xtp.h>
|
||||||
|
|
||||||
#include "entry.h"
|
#include "entry.h"
|
||||||
|
|
||||||
|
@ -18,6 +18,7 @@
|
|||||||
#include <asm/page.h>
|
#include <asm/page.h>
|
||||||
#include <asm/sal.h>
|
#include <asm/sal.h>
|
||||||
#include <asm/pal.h>
|
#include <asm/pal.h>
|
||||||
|
#include <asm/xtp.h>
|
||||||
|
|
||||||
__cacheline_aligned DEFINE_SPINLOCK(sal_lock);
|
__cacheline_aligned DEFINE_SPINLOCK(sal_lock);
|
||||||
unsigned long sal_platform_features;
|
unsigned long sal_platform_features;
|
||||||
|
@ -65,6 +65,7 @@
|
|||||||
#include <asm/tlbflush.h>
|
#include <asm/tlbflush.h>
|
||||||
#include <asm/unistd.h>
|
#include <asm/unistd.h>
|
||||||
#include <asm/uv/uv.h>
|
#include <asm/uv/uv.h>
|
||||||
|
#include <asm/xtp.h>
|
||||||
|
|
||||||
#if defined(CONFIG_SMP) && (IA64_CPU_SIZE > PAGE_SIZE)
|
#if defined(CONFIG_SMP) && (IA64_CPU_SIZE > PAGE_SIZE)
|
||||||
# error "struct cpuinfo_ia64 too big!"
|
# error "struct cpuinfo_ia64 too big!"
|
||||||
|
@ -45,6 +45,7 @@
|
|||||||
#include <asm/tlbflush.h>
|
#include <asm/tlbflush.h>
|
||||||
#include <asm/unistd.h>
|
#include <asm/unistd.h>
|
||||||
#include <asm/mca.h>
|
#include <asm/mca.h>
|
||||||
|
#include <asm/xtp.h>
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Note: alignment of 4 entries/cacheline was empirically determined
|
* Note: alignment of 4 entries/cacheline was empirically determined
|
||||||
|
@ -7,6 +7,7 @@
|
|||||||
#ifndef _ASMPARISC_TIMEX_H
|
#ifndef _ASMPARISC_TIMEX_H
|
||||||
#define _ASMPARISC_TIMEX_H
|
#define _ASMPARISC_TIMEX_H
|
||||||
|
|
||||||
|
#include <asm/special_insns.h>
|
||||||
|
|
||||||
#define CLOCK_TICK_RATE 1193180 /* Underlying HZ */
|
#define CLOCK_TICK_RATE 1193180 /* Underlying HZ */
|
||||||
|
|
||||||
|
@ -17,6 +17,7 @@
|
|||||||
#include <asm/cache.h>
|
#include <asm/cache.h>
|
||||||
#include <asm/addrspace.h>
|
#include <asm/addrspace.h>
|
||||||
#include <asm/machvec.h>
|
#include <asm/machvec.h>
|
||||||
|
#include <asm/page.h>
|
||||||
#include <linux/pgtable.h>
|
#include <linux/pgtable.h>
|
||||||
#include <asm-generic/iomap.h>
|
#include <asm-generic/iomap.h>
|
||||||
|
|
||||||
|
@ -15,6 +15,7 @@
|
|||||||
#include <asm/setup.h>
|
#include <asm/setup.h>
|
||||||
#include <asm/io.h>
|
#include <asm/io.h>
|
||||||
#include <asm/irq.h>
|
#include <asm/irq.h>
|
||||||
|
#include <asm/processor.h>
|
||||||
|
|
||||||
#define MV_NAME_SIZE 32
|
#define MV_NAME_SIZE 32
|
||||||
|
|
||||||
|
@ -7,6 +7,7 @@
|
|||||||
#ifndef _SPARC64_TIMER_H
|
#ifndef _SPARC64_TIMER_H
|
||||||
#define _SPARC64_TIMER_H
|
#define _SPARC64_TIMER_H
|
||||||
|
|
||||||
|
#include <uapi/asm/asi.h>
|
||||||
#include <linux/types.h>
|
#include <linux/types.h>
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
|
|
||||||
|
@ -6,7 +6,8 @@
|
|||||||
#define _ASM_SPARC_VVAR_DATA_H
|
#define _ASM_SPARC_VVAR_DATA_H
|
||||||
|
|
||||||
#include <asm/clocksource.h>
|
#include <asm/clocksource.h>
|
||||||
#include <linux/seqlock.h>
|
#include <asm/processor.h>
|
||||||
|
#include <asm/barrier.h>
|
||||||
#include <linux/time.h>
|
#include <linux/time.h>
|
||||||
#include <linux/types.h>
|
#include <linux/types.h>
|
||||||
|
|
||||||
|
@ -7,7 +7,6 @@
|
|||||||
* a different vsyscall implementation for Linux/IA32 and for the name.
|
* a different vsyscall implementation for Linux/IA32 and for the name.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <linux/seqlock.h>
|
|
||||||
#include <linux/time.h>
|
#include <linux/time.h>
|
||||||
#include <linux/timekeeper_internal.h>
|
#include <linux/timekeeper_internal.h>
|
||||||
|
|
||||||
|
@ -26,9 +26,9 @@
|
|||||||
|
|
||||||
#ifndef __ASSEMBLY__
|
#ifndef __ASSEMBLY__
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
#include <asm/acpi.h>
|
|
||||||
#include <asm/apicdef.h>
|
#include <asm/apicdef.h>
|
||||||
#include <asm/page.h>
|
#include <asm/page.h>
|
||||||
|
#include <asm/pgtable_types.h>
|
||||||
#ifdef CONFIG_X86_32
|
#ifdef CONFIG_X86_32
|
||||||
#include <linux/threads.h>
|
#include <linux/threads.h>
|
||||||
#include <asm/kmap_types.h>
|
#include <asm/kmap_types.h>
|
||||||
|
@ -5,16 +5,6 @@
|
|||||||
#include <linux/cpumask.h>
|
#include <linux/cpumask.h>
|
||||||
#include <asm/percpu.h>
|
#include <asm/percpu.h>
|
||||||
|
|
||||||
/*
|
|
||||||
* We need the APIC definitions automatically as part of 'smp.h'
|
|
||||||
*/
|
|
||||||
#ifdef CONFIG_X86_LOCAL_APIC
|
|
||||||
# include <asm/mpspec.h>
|
|
||||||
# include <asm/apic.h>
|
|
||||||
# ifdef CONFIG_X86_IO_APIC
|
|
||||||
# include <asm/io_apic.h>
|
|
||||||
# endif
|
|
||||||
#endif
|
|
||||||
#include <asm/thread_info.h>
|
#include <asm/thread_info.h>
|
||||||
#include <asm/cpumask.h>
|
#include <asm/cpumask.h>
|
||||||
|
|
||||||
|
@ -6,6 +6,7 @@
|
|||||||
#define _ASM_X86_TSC_H
|
#define _ASM_X86_TSC_H
|
||||||
|
|
||||||
#include <asm/processor.h>
|
#include <asm/processor.h>
|
||||||
|
#include <asm/cpufeature.h>
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Standard way to access the cycle counter.
|
* Standard way to access the cycle counter.
|
||||||
|
@ -46,6 +46,7 @@
|
|||||||
#include <asm/proto.h>
|
#include <asm/proto.h>
|
||||||
#include <asm/traps.h>
|
#include <asm/traps.h>
|
||||||
#include <asm/apic.h>
|
#include <asm/apic.h>
|
||||||
|
#include <asm/acpi.h>
|
||||||
#include <asm/io_apic.h>
|
#include <asm/io_apic.h>
|
||||||
#include <asm/desc.h>
|
#include <asm/desc.h>
|
||||||
#include <asm/hpet.h>
|
#include <asm/hpet.h>
|
||||||
|
@ -10,6 +10,7 @@
|
|||||||
* like self-ipi, etc...
|
* like self-ipi, etc...
|
||||||
*/
|
*/
|
||||||
#include <linux/cpumask.h>
|
#include <linux/cpumask.h>
|
||||||
|
#include <linux/thread_info.h>
|
||||||
|
|
||||||
#include <asm/apic.h>
|
#include <asm/apic.h>
|
||||||
|
|
||||||
|
@ -9,6 +9,7 @@
|
|||||||
#include <linux/smp.h>
|
#include <linux/smp.h>
|
||||||
|
|
||||||
#include <asm/apic.h>
|
#include <asm/apic.h>
|
||||||
|
#include <asm/io_apic.h>
|
||||||
|
|
||||||
#include "local.h"
|
#include "local.h"
|
||||||
|
|
||||||
|
@ -9,6 +9,7 @@
|
|||||||
* Bits copied from original nmi.c file
|
* Bits copied from original nmi.c file
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
#include <linux/thread_info.h>
|
||||||
#include <asm/apic.h>
|
#include <asm/apic.h>
|
||||||
#include <asm/nmi.h>
|
#include <asm/nmi.h>
|
||||||
|
|
||||||
|
@ -2,6 +2,7 @@
|
|||||||
|
|
||||||
#include <linux/cpumask.h>
|
#include <linux/cpumask.h>
|
||||||
#include <linux/smp.h>
|
#include <linux/smp.h>
|
||||||
|
#include <asm/io_apic.h>
|
||||||
|
|
||||||
#include "local.h"
|
#include "local.h"
|
||||||
|
|
||||||
|
@ -10,6 +10,7 @@
|
|||||||
|
|
||||||
#include <linux/jump_label.h>
|
#include <linux/jump_label.h>
|
||||||
|
|
||||||
|
#include <asm/irq_vectors.h>
|
||||||
#include <asm/apic.h>
|
#include <asm/apic.h>
|
||||||
|
|
||||||
/* APIC flat 64 */
|
/* APIC flat 64 */
|
||||||
|
@ -10,6 +10,7 @@
|
|||||||
#include <linux/errno.h>
|
#include <linux/errno.h>
|
||||||
#include <linux/smp.h>
|
#include <linux/smp.h>
|
||||||
|
|
||||||
|
#include <asm/io_apic.h>
|
||||||
#include <asm/apic.h>
|
#include <asm/apic.h>
|
||||||
#include <asm/acpi.h>
|
#include <asm/acpi.h>
|
||||||
|
|
||||||
|
@ -8,6 +8,7 @@
|
|||||||
* Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and
|
* Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and
|
||||||
* James Cleverdon.
|
* James Cleverdon.
|
||||||
*/
|
*/
|
||||||
|
#include <linux/thread_info.h>
|
||||||
#include <asm/apic.h>
|
#include <asm/apic.h>
|
||||||
|
|
||||||
#include "local.h"
|
#include "local.h"
|
||||||
|
@ -15,6 +15,7 @@
|
|||||||
#include <asm/cpu.h>
|
#include <asm/cpu.h>
|
||||||
#include <asm/spec-ctrl.h>
|
#include <asm/spec-ctrl.h>
|
||||||
#include <asm/smp.h>
|
#include <asm/smp.h>
|
||||||
|
#include <asm/numa.h>
|
||||||
#include <asm/pci-direct.h>
|
#include <asm/pci-direct.h>
|
||||||
#include <asm/delay.h>
|
#include <asm/delay.h>
|
||||||
#include <asm/debugreg.h>
|
#include <asm/debugreg.h>
|
||||||
|
@ -45,6 +45,7 @@
|
|||||||
#include <asm/mtrr.h>
|
#include <asm/mtrr.h>
|
||||||
#include <asm/hwcap2.h>
|
#include <asm/hwcap2.h>
|
||||||
#include <linux/numa.h>
|
#include <linux/numa.h>
|
||||||
|
#include <asm/numa.h>
|
||||||
#include <asm/asm.h>
|
#include <asm/asm.h>
|
||||||
#include <asm/bugs.h>
|
#include <asm/bugs.h>
|
||||||
#include <asm/cpu.h>
|
#include <asm/cpu.h>
|
||||||
|
@ -10,6 +10,7 @@
|
|||||||
|
|
||||||
#include <asm/cpu.h>
|
#include <asm/cpu.h>
|
||||||
#include <asm/smp.h>
|
#include <asm/smp.h>
|
||||||
|
#include <asm/numa.h>
|
||||||
#include <asm/cacheinfo.h>
|
#include <asm/cacheinfo.h>
|
||||||
#include <asm/spec-ctrl.h>
|
#include <asm/spec-ctrl.h>
|
||||||
#include <asm/delay.h>
|
#include <asm/delay.h>
|
||||||
|
@ -23,6 +23,7 @@
|
|||||||
#include <asm/cmdline.h>
|
#include <asm/cmdline.h>
|
||||||
#include <asm/traps.h>
|
#include <asm/traps.h>
|
||||||
#include <asm/resctrl.h>
|
#include <asm/resctrl.h>
|
||||||
|
#include <asm/numa.h>
|
||||||
|
|
||||||
#ifdef CONFIG_X86_64
|
#ifdef CONFIG_X86_64
|
||||||
#include <linux/topology.h>
|
#include <linux/topology.h>
|
||||||
|
@ -20,6 +20,7 @@
|
|||||||
#include <asm/irqdomain.h>
|
#include <asm/irqdomain.h>
|
||||||
#include <asm/hpet.h>
|
#include <asm/hpet.h>
|
||||||
#include <asm/apic.h>
|
#include <asm/apic.h>
|
||||||
|
#include <asm/io_apic.h>
|
||||||
#include <asm/pci_x86.h>
|
#include <asm/pci_x86.h>
|
||||||
#include <asm/setup.h>
|
#include <asm/setup.h>
|
||||||
#include <asm/i8259.h>
|
#include <asm/i8259.h>
|
||||||
|
@ -22,6 +22,8 @@
|
|||||||
#include <asm/timer.h>
|
#include <asm/timer.h>
|
||||||
#include <asm/hw_irq.h>
|
#include <asm/hw_irq.h>
|
||||||
#include <asm/desc.h>
|
#include <asm/desc.h>
|
||||||
|
#include <asm/io_apic.h>
|
||||||
|
#include <asm/acpi.h>
|
||||||
#include <asm/apic.h>
|
#include <asm/apic.h>
|
||||||
#include <asm/setup.h>
|
#include <asm/setup.h>
|
||||||
#include <asm/i8259.h>
|
#include <asm/i8259.h>
|
||||||
|
@ -13,6 +13,8 @@
|
|||||||
#include <linux/reboot.h>
|
#include <linux/reboot.h>
|
||||||
#include <linux/serial_8250.h>
|
#include <linux/serial_8250.h>
|
||||||
#include <asm/apic.h>
|
#include <asm/apic.h>
|
||||||
|
#include <asm/io_apic.h>
|
||||||
|
#include <asm/acpi.h>
|
||||||
#include <asm/cpu.h>
|
#include <asm/cpu.h>
|
||||||
#include <asm/hypervisor.h>
|
#include <asm/hypervisor.h>
|
||||||
#include <asm/i8259.h>
|
#include <asm/i8259.h>
|
||||||
|
@ -19,6 +19,8 @@
|
|||||||
#include <linux/smp.h>
|
#include <linux/smp.h>
|
||||||
#include <linux/pci.h>
|
#include <linux/pci.h>
|
||||||
|
|
||||||
|
#include <asm/io_apic.h>
|
||||||
|
#include <asm/acpi.h>
|
||||||
#include <asm/irqdomain.h>
|
#include <asm/irqdomain.h>
|
||||||
#include <asm/mtrr.h>
|
#include <asm/mtrr.h>
|
||||||
#include <asm/mpspec.h>
|
#include <asm/mpspec.h>
|
||||||
|
@ -25,6 +25,7 @@
|
|||||||
#include <xen/xen.h>
|
#include <xen/xen.h>
|
||||||
|
|
||||||
#include <asm/apic.h>
|
#include <asm/apic.h>
|
||||||
|
#include <asm/numa.h>
|
||||||
#include <asm/bios_ebda.h>
|
#include <asm/bios_ebda.h>
|
||||||
#include <asm/bugs.h>
|
#include <asm/bugs.h>
|
||||||
#include <asm/cpu.h>
|
#include <asm/cpu.h>
|
||||||
|
@ -31,6 +31,7 @@
|
|||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
#include <linux/smp.h>
|
#include <linux/smp.h>
|
||||||
#include <linux/irq.h>
|
#include <linux/irq.h>
|
||||||
|
#include <asm/io_apic.h>
|
||||||
#include <asm/cpu.h>
|
#include <asm/cpu.h>
|
||||||
|
|
||||||
static DEFINE_PER_CPU(struct x86_cpu, cpu_devices);
|
static DEFINE_PER_CPU(struct x86_cpu, cpu_devices);
|
||||||
|
@ -7,6 +7,7 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
|
#include <linux/thread_info.h>
|
||||||
|
|
||||||
#include <asm/apic.h>
|
#include <asm/apic.h>
|
||||||
#include <asm/cpu_device_id.h>
|
#include <asm/cpu_device_id.h>
|
||||||
|
@ -52,6 +52,7 @@
|
|||||||
#include <asm/cpu_entry_area.h>
|
#include <asm/cpu_entry_area.h>
|
||||||
#include <asm/init.h>
|
#include <asm/init.h>
|
||||||
#include <asm/pgtable_areas.h>
|
#include <asm/pgtable_areas.h>
|
||||||
|
#include <asm/numa.h>
|
||||||
|
|
||||||
#include "mm_internal.h"
|
#include "mm_internal.h"
|
||||||
|
|
||||||
|
@ -1,8 +1,10 @@
|
|||||||
// SPDX-License-Identifier: GPL-2.0
|
// SPDX-License-Identifier: GPL-2.0
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
|
#include <linux/thread_info.h>
|
||||||
|
|
||||||
#include <asm/x86_init.h>
|
#include <asm/x86_init.h>
|
||||||
#include <asm/apic.h>
|
#include <asm/apic.h>
|
||||||
|
#include <asm/io_apic.h>
|
||||||
#include <asm/xen/hypercall.h>
|
#include <asm/xen/hypercall.h>
|
||||||
|
|
||||||
#include <xen/xen.h>
|
#include <xen/xen.h>
|
||||||
|
@ -11,6 +11,7 @@
|
|||||||
|
|
||||||
#include <asm/cpu.h>
|
#include <asm/cpu.h>
|
||||||
#include <asm/smp.h>
|
#include <asm/smp.h>
|
||||||
|
#include <asm/io_apic.h>
|
||||||
#include <asm/reboot.h>
|
#include <asm/reboot.h>
|
||||||
#include <asm/setup.h>
|
#include <asm/setup.h>
|
||||||
#include <asm/idtentry.h>
|
#include <asm/idtentry.h>
|
||||||
|
@ -1,4 +1,5 @@
|
|||||||
// SPDX-License-Identifier: GPL-2.0
|
// SPDX-License-Identifier: GPL-2.0
|
||||||
|
#include <linux/thread_info.h>
|
||||||
#include <asm/smp.h>
|
#include <asm/smp.h>
|
||||||
|
|
||||||
#include <xen/events.h>
|
#include <xen/events.h>
|
||||||
|
@ -29,6 +29,7 @@
|
|||||||
#include <asm/idtentry.h>
|
#include <asm/idtentry.h>
|
||||||
#include <asm/desc.h>
|
#include <asm/desc.h>
|
||||||
#include <asm/cpu.h>
|
#include <asm/cpu.h>
|
||||||
|
#include <asm/io_apic.h>
|
||||||
|
|
||||||
#include <xen/interface/xen.h>
|
#include <xen/interface/xen.h>
|
||||||
#include <xen/interface/vcpu.h>
|
#include <xen/interface/vcpu.h>
|
||||||
|
@ -1,11 +1,11 @@
|
|||||||
// SPDX-License-Identifier: GPL-2.0
|
// SPDX-License-Identifier: GPL-2.0
|
||||||
#include <linux/types.h>
|
#include <linux/types.h>
|
||||||
|
|
||||||
#include <asm/fixmap.h>
|
|
||||||
|
|
||||||
#include <asm/xen/hypercall.h>
|
#include <asm/xen/hypercall.h>
|
||||||
#include <asm/xen/page.h>
|
#include <asm/xen/page.h>
|
||||||
|
|
||||||
|
#include <asm/fixmap.h>
|
||||||
|
|
||||||
#include "xen-ops.h"
|
#include "xen-ops.h"
|
||||||
|
|
||||||
void xen_pv_pre_suspend(void)
|
void xen_pv_pre_suspend(void)
|
||||||
|
@ -406,7 +406,7 @@ struct ioc {
|
|||||||
enum ioc_running running;
|
enum ioc_running running;
|
||||||
atomic64_t vtime_rate;
|
atomic64_t vtime_rate;
|
||||||
|
|
||||||
seqcount_t period_seqcount;
|
seqcount_spinlock_t period_seqcount;
|
||||||
u32 period_at; /* wallclock starttime */
|
u32 period_at; /* wallclock starttime */
|
||||||
u64 period_at_vtime; /* vtime starttime */
|
u64 period_at_vtime; /* vtime starttime */
|
||||||
|
|
||||||
@ -873,7 +873,6 @@ static void ioc_now(struct ioc *ioc, struct ioc_now *now)
|
|||||||
|
|
||||||
static void ioc_start_period(struct ioc *ioc, struct ioc_now *now)
|
static void ioc_start_period(struct ioc *ioc, struct ioc_now *now)
|
||||||
{
|
{
|
||||||
lockdep_assert_held(&ioc->lock);
|
|
||||||
WARN_ON_ONCE(ioc->running != IOC_RUNNING);
|
WARN_ON_ONCE(ioc->running != IOC_RUNNING);
|
||||||
|
|
||||||
write_seqcount_begin(&ioc->period_seqcount);
|
write_seqcount_begin(&ioc->period_seqcount);
|
||||||
@ -2001,7 +2000,7 @@ static int blk_iocost_init(struct request_queue *q)
|
|||||||
|
|
||||||
ioc->running = IOC_IDLE;
|
ioc->running = IOC_IDLE;
|
||||||
atomic64_set(&ioc->vtime_rate, VTIME_PER_USEC);
|
atomic64_set(&ioc->vtime_rate, VTIME_PER_USEC);
|
||||||
seqcount_init(&ioc->period_seqcount);
|
seqcount_spinlock_init(&ioc->period_seqcount, &ioc->lock);
|
||||||
ioc->period_at = ktime_to_us(ktime_get());
|
ioc->period_at = ktime_to_us(ktime_get());
|
||||||
atomic64_set(&ioc->cur_period, 0);
|
atomic64_set(&ioc->cur_period, 0);
|
||||||
atomic_set(&ioc->hweight_gen, 0);
|
atomic_set(&ioc->hweight_gen, 0);
|
||||||
|
@ -52,12 +52,6 @@
|
|||||||
DEFINE_WD_CLASS(reservation_ww_class);
|
DEFINE_WD_CLASS(reservation_ww_class);
|
||||||
EXPORT_SYMBOL(reservation_ww_class);
|
EXPORT_SYMBOL(reservation_ww_class);
|
||||||
|
|
||||||
struct lock_class_key reservation_seqcount_class;
|
|
||||||
EXPORT_SYMBOL(reservation_seqcount_class);
|
|
||||||
|
|
||||||
const char reservation_seqcount_string[] = "reservation_seqcount";
|
|
||||||
EXPORT_SYMBOL(reservation_seqcount_string);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* dma_resv_list_alloc - allocate fence list
|
* dma_resv_list_alloc - allocate fence list
|
||||||
* @shared_max: number of fences we need space for
|
* @shared_max: number of fences we need space for
|
||||||
@ -143,9 +137,8 @@ subsys_initcall(dma_resv_lockdep);
|
|||||||
void dma_resv_init(struct dma_resv *obj)
|
void dma_resv_init(struct dma_resv *obj)
|
||||||
{
|
{
|
||||||
ww_mutex_init(&obj->lock, &reservation_ww_class);
|
ww_mutex_init(&obj->lock, &reservation_ww_class);
|
||||||
|
seqcount_ww_mutex_init(&obj->seq, &obj->lock);
|
||||||
|
|
||||||
__seqcount_init(&obj->seq, reservation_seqcount_string,
|
|
||||||
&reservation_seqcount_class);
|
|
||||||
RCU_INIT_POINTER(obj->fence, NULL);
|
RCU_INIT_POINTER(obj->fence, NULL);
|
||||||
RCU_INIT_POINTER(obj->fence_excl, NULL);
|
RCU_INIT_POINTER(obj->fence_excl, NULL);
|
||||||
}
|
}
|
||||||
@ -275,7 +268,6 @@ void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence)
|
|||||||
fobj = dma_resv_get_list(obj);
|
fobj = dma_resv_get_list(obj);
|
||||||
count = fobj->shared_count;
|
count = fobj->shared_count;
|
||||||
|
|
||||||
preempt_disable();
|
|
||||||
write_seqcount_begin(&obj->seq);
|
write_seqcount_begin(&obj->seq);
|
||||||
|
|
||||||
for (i = 0; i < count; ++i) {
|
for (i = 0; i < count; ++i) {
|
||||||
@ -297,7 +289,6 @@ replace:
|
|||||||
smp_store_mb(fobj->shared_count, count);
|
smp_store_mb(fobj->shared_count, count);
|
||||||
|
|
||||||
write_seqcount_end(&obj->seq);
|
write_seqcount_end(&obj->seq);
|
||||||
preempt_enable();
|
|
||||||
dma_fence_put(old);
|
dma_fence_put(old);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(dma_resv_add_shared_fence);
|
EXPORT_SYMBOL(dma_resv_add_shared_fence);
|
||||||
@ -324,14 +315,12 @@ void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
|
|||||||
if (fence)
|
if (fence)
|
||||||
dma_fence_get(fence);
|
dma_fence_get(fence);
|
||||||
|
|
||||||
preempt_disable();
|
|
||||||
write_seqcount_begin(&obj->seq);
|
write_seqcount_begin(&obj->seq);
|
||||||
/* write_seqcount_begin provides the necessary memory barrier */
|
/* write_seqcount_begin provides the necessary memory barrier */
|
||||||
RCU_INIT_POINTER(obj->fence_excl, fence);
|
RCU_INIT_POINTER(obj->fence_excl, fence);
|
||||||
if (old)
|
if (old)
|
||||||
old->shared_count = 0;
|
old->shared_count = 0;
|
||||||
write_seqcount_end(&obj->seq);
|
write_seqcount_end(&obj->seq);
|
||||||
preempt_enable();
|
|
||||||
|
|
||||||
/* inplace update, no shared fences */
|
/* inplace update, no shared fences */
|
||||||
while (i--)
|
while (i--)
|
||||||
@ -409,13 +398,11 @@ retry:
|
|||||||
src_list = dma_resv_get_list(dst);
|
src_list = dma_resv_get_list(dst);
|
||||||
old = dma_resv_get_excl(dst);
|
old = dma_resv_get_excl(dst);
|
||||||
|
|
||||||
preempt_disable();
|
|
||||||
write_seqcount_begin(&dst->seq);
|
write_seqcount_begin(&dst->seq);
|
||||||
/* write_seqcount_begin provides the necessary memory barrier */
|
/* write_seqcount_begin provides the necessary memory barrier */
|
||||||
RCU_INIT_POINTER(dst->fence_excl, new);
|
RCU_INIT_POINTER(dst->fence_excl, new);
|
||||||
RCU_INIT_POINTER(dst->fence, dst_list);
|
RCU_INIT_POINTER(dst->fence, dst_list);
|
||||||
write_seqcount_end(&dst->seq);
|
write_seqcount_end(&dst->seq);
|
||||||
preempt_enable();
|
|
||||||
|
|
||||||
dma_resv_list_free(src_list);
|
dma_resv_list_free(src_list);
|
||||||
dma_fence_put(old);
|
dma_fence_put(old);
|
||||||
|
@ -258,11 +258,9 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
|
|||||||
new->shared_count = k;
|
new->shared_count = k;
|
||||||
|
|
||||||
/* Install the new fence list, seqcount provides the barriers */
|
/* Install the new fence list, seqcount provides the barriers */
|
||||||
preempt_disable();
|
|
||||||
write_seqcount_begin(&resv->seq);
|
write_seqcount_begin(&resv->seq);
|
||||||
RCU_INIT_POINTER(resv->fence, new);
|
RCU_INIT_POINTER(resv->fence, new);
|
||||||
write_seqcount_end(&resv->seq);
|
write_seqcount_end(&resv->seq);
|
||||||
preempt_enable();
|
|
||||||
|
|
||||||
/* Drop the references to the removed fences or move them to ef_list */
|
/* Drop the references to the removed fences or move them to ef_list */
|
||||||
for (i = j, k = 0; i < old->shared_count; ++i) {
|
for (i = j, k = 0; i < old->shared_count; ++i) {
|
||||||
|
@ -15,6 +15,7 @@
|
|||||||
#include <linux/irqdomain.h>
|
#include <linux/irqdomain.h>
|
||||||
#include <linux/crash_dump.h>
|
#include <linux/crash_dump.h>
|
||||||
#include <asm/io_apic.h>
|
#include <asm/io_apic.h>
|
||||||
|
#include <asm/apic.h>
|
||||||
#include <asm/smp.h>
|
#include <asm/smp.h>
|
||||||
#include <asm/cpu.h>
|
#include <asm/cpu.h>
|
||||||
#include <asm/irq_remapping.h>
|
#include <asm/irq_remapping.h>
|
||||||
|
@ -7019,7 +7019,7 @@ static struct r5conf *setup_conf(struct mddev *mddev)
|
|||||||
} else
|
} else
|
||||||
goto abort;
|
goto abort;
|
||||||
spin_lock_init(&conf->device_lock);
|
spin_lock_init(&conf->device_lock);
|
||||||
seqcount_init(&conf->gen_lock);
|
seqcount_spinlock_init(&conf->gen_lock, &conf->device_lock);
|
||||||
mutex_init(&conf->cache_size_mutex);
|
mutex_init(&conf->cache_size_mutex);
|
||||||
init_waitqueue_head(&conf->wait_for_quiescent);
|
init_waitqueue_head(&conf->wait_for_quiescent);
|
||||||
init_waitqueue_head(&conf->wait_for_stripe);
|
init_waitqueue_head(&conf->wait_for_stripe);
|
||||||
|
@ -582,7 +582,7 @@ struct r5conf {
|
|||||||
int prev_chunk_sectors;
|
int prev_chunk_sectors;
|
||||||
int prev_algo;
|
int prev_algo;
|
||||||
short generation; /* increments with every reshape */
|
short generation; /* increments with every reshape */
|
||||||
seqcount_t gen_lock; /* lock against generation changes */
|
seqcount_spinlock_t gen_lock; /* lock against generation changes */
|
||||||
unsigned long reshape_checkpoint; /* Time we last updated
|
unsigned long reshape_checkpoint; /* Time we last updated
|
||||||
* metadata */
|
* metadata */
|
||||||
long long min_offset_diff; /* minimum difference between
|
long long min_offset_diff; /* minimum difference between
|
||||||
|
@ -1746,7 +1746,7 @@ static struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
|
|||||||
dentry->d_lockref.count = 1;
|
dentry->d_lockref.count = 1;
|
||||||
dentry->d_flags = 0;
|
dentry->d_flags = 0;
|
||||||
spin_lock_init(&dentry->d_lock);
|
spin_lock_init(&dentry->d_lock);
|
||||||
seqcount_init(&dentry->d_seq);
|
seqcount_spinlock_init(&dentry->d_seq, &dentry->d_lock);
|
||||||
dentry->d_inode = NULL;
|
dentry->d_inode = NULL;
|
||||||
dentry->d_parent = dentry;
|
dentry->d_parent = dentry;
|
||||||
dentry->d_sb = sb;
|
dentry->d_sb = sb;
|
||||||
|
@ -117,7 +117,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
|
|||||||
fs->users = 1;
|
fs->users = 1;
|
||||||
fs->in_exec = 0;
|
fs->in_exec = 0;
|
||||||
spin_lock_init(&fs->lock);
|
spin_lock_init(&fs->lock);
|
||||||
seqcount_init(&fs->seq);
|
seqcount_spinlock_init(&fs->seq, &fs->lock);
|
||||||
fs->umask = old->umask;
|
fs->umask = old->umask;
|
||||||
|
|
||||||
spin_lock(&old->lock);
|
spin_lock(&old->lock);
|
||||||
@ -163,6 +163,6 @@ EXPORT_SYMBOL(current_umask);
|
|||||||
struct fs_struct init_fs = {
|
struct fs_struct init_fs = {
|
||||||
.users = 1,
|
.users = 1,
|
||||||
.lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
|
.lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
|
||||||
.seq = SEQCNT_ZERO(init_fs.seq),
|
.seq = SEQCNT_SPINLOCK_ZERO(init_fs.seq, &init_fs.lock),
|
||||||
.umask = 0022,
|
.umask = 0022,
|
||||||
};
|
};
|
||||||
|
@ -117,7 +117,7 @@ struct nfs4_state_owner {
|
|||||||
unsigned long so_flags;
|
unsigned long so_flags;
|
||||||
struct list_head so_states;
|
struct list_head so_states;
|
||||||
struct nfs_seqid_counter so_seqid;
|
struct nfs_seqid_counter so_seqid;
|
||||||
seqcount_t so_reclaim_seqcount;
|
seqcount_spinlock_t so_reclaim_seqcount;
|
||||||
struct mutex so_delegreturn_mutex;
|
struct mutex so_delegreturn_mutex;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -509,7 +509,7 @@ nfs4_alloc_state_owner(struct nfs_server *server,
|
|||||||
nfs4_init_seqid_counter(&sp->so_seqid);
|
nfs4_init_seqid_counter(&sp->so_seqid);
|
||||||
atomic_set(&sp->so_count, 1);
|
atomic_set(&sp->so_count, 1);
|
||||||
INIT_LIST_HEAD(&sp->so_lru);
|
INIT_LIST_HEAD(&sp->so_lru);
|
||||||
seqcount_init(&sp->so_reclaim_seqcount);
|
seqcount_spinlock_init(&sp->so_reclaim_seqcount, &sp->so_lock);
|
||||||
mutex_init(&sp->so_delegreturn_mutex);
|
mutex_init(&sp->so_delegreturn_mutex);
|
||||||
return sp;
|
return sp;
|
||||||
}
|
}
|
||||||
|
@ -61,7 +61,7 @@ struct userfaultfd_ctx {
|
|||||||
/* waitqueue head for events */
|
/* waitqueue head for events */
|
||||||
wait_queue_head_t event_wqh;
|
wait_queue_head_t event_wqh;
|
||||||
/* a refile sequence protected by fault_pending_wqh lock */
|
/* a refile sequence protected by fault_pending_wqh lock */
|
||||||
struct seqcount refile_seq;
|
seqcount_spinlock_t refile_seq;
|
||||||
/* pseudo fd refcounting */
|
/* pseudo fd refcounting */
|
||||||
refcount_t refcount;
|
refcount_t refcount;
|
||||||
/* userfaultfd syscall flags */
|
/* userfaultfd syscall flags */
|
||||||
@ -1961,7 +1961,7 @@ static void init_once_userfaultfd_ctx(void *mem)
|
|||||||
init_waitqueue_head(&ctx->fault_wqh);
|
init_waitqueue_head(&ctx->fault_wqh);
|
||||||
init_waitqueue_head(&ctx->event_wqh);
|
init_waitqueue_head(&ctx->event_wqh);
|
||||||
init_waitqueue_head(&ctx->fd_wqh);
|
init_waitqueue_head(&ctx->fd_wqh);
|
||||||
seqcount_init(&ctx->refile_seq);
|
seqcount_spinlock_init(&ctx->refile_seq, &ctx->fault_pending_wqh.lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
SYSCALL_DEFINE1(userfaultfd, int, flags)
|
SYSCALL_DEFINE1(userfaultfd, int, flags)
|
||||||
|
@ -89,7 +89,7 @@ extern struct dentry_stat_t dentry_stat;
|
|||||||
struct dentry {
|
struct dentry {
|
||||||
/* RCU lookup touched fields */
|
/* RCU lookup touched fields */
|
||||||
unsigned int d_flags; /* protected by d_lock */
|
unsigned int d_flags; /* protected by d_lock */
|
||||||
seqcount_t d_seq; /* per dentry seqlock */
|
seqcount_spinlock_t d_seq; /* per dentry seqlock */
|
||||||
struct hlist_bl_node d_hash; /* lookup hash list */
|
struct hlist_bl_node d_hash; /* lookup hash list */
|
||||||
struct dentry *d_parent; /* parent directory */
|
struct dentry *d_parent; /* parent directory */
|
||||||
struct qstr d_name;
|
struct qstr d_name;
|
||||||
|
@ -46,8 +46,6 @@
|
|||||||
#include <linux/rcupdate.h>
|
#include <linux/rcupdate.h>
|
||||||
|
|
||||||
extern struct ww_class reservation_ww_class;
|
extern struct ww_class reservation_ww_class;
|
||||||
extern struct lock_class_key reservation_seqcount_class;
|
|
||||||
extern const char reservation_seqcount_string[];
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* struct dma_resv_list - a list of shared fences
|
* struct dma_resv_list - a list of shared fences
|
||||||
@ -71,7 +69,7 @@ struct dma_resv_list {
|
|||||||
*/
|
*/
|
||||||
struct dma_resv {
|
struct dma_resv {
|
||||||
struct ww_mutex lock;
|
struct ww_mutex lock;
|
||||||
seqcount_t seq;
|
seqcount_ww_mutex_t seq;
|
||||||
|
|
||||||
struct dma_fence __rcu *fence_excl;
|
struct dma_fence __rcu *fence_excl;
|
||||||
struct dma_resv_list __rcu *fence;
|
struct dma_resv_list __rcu *fence;
|
||||||
|
@ -38,6 +38,8 @@
|
|||||||
|
|
||||||
#ifdef __KERNEL__
|
#ifdef __KERNEL__
|
||||||
|
|
||||||
|
#include <asm/bug.h>
|
||||||
|
|
||||||
struct dql {
|
struct dql {
|
||||||
/* Fields accessed in enqueue path (dql_queued) */
|
/* Fields accessed in enqueue path (dql_queued) */
|
||||||
unsigned int num_queued; /* Total ever queued */
|
unsigned int num_queued; /* Total ever queued */
|
||||||
|
@ -9,7 +9,7 @@
|
|||||||
struct fs_struct {
|
struct fs_struct {
|
||||||
int users;
|
int users;
|
||||||
spinlock_t lock;
|
spinlock_t lock;
|
||||||
seqcount_t seq;
|
seqcount_spinlock_t seq;
|
||||||
int umask;
|
int umask;
|
||||||
int in_exec;
|
int in_exec;
|
||||||
struct path root, pwd;
|
struct path root, pwd;
|
||||||
|
@ -17,6 +17,7 @@
|
|||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
#include <linux/list.h>
|
#include <linux/list.h>
|
||||||
#include <linux/percpu.h>
|
#include <linux/percpu.h>
|
||||||
|
#include <linux/seqlock.h>
|
||||||
#include <linux/timer.h>
|
#include <linux/timer.h>
|
||||||
#include <linux/timerqueue.h>
|
#include <linux/timerqueue.h>
|
||||||
|
|
||||||
@ -159,7 +160,7 @@ struct hrtimer_clock_base {
|
|||||||
struct hrtimer_cpu_base *cpu_base;
|
struct hrtimer_cpu_base *cpu_base;
|
||||||
unsigned int index;
|
unsigned int index;
|
||||||
clockid_t clockid;
|
clockid_t clockid;
|
||||||
seqcount_t seq;
|
seqcount_raw_spinlock_t seq;
|
||||||
struct hrtimer *running;
|
struct hrtimer *running;
|
||||||
struct timerqueue_head active;
|
struct timerqueue_head active;
|
||||||
ktime_t (*get_time)(void);
|
ktime_t (*get_time)(void);
|
||||||
|
@ -23,6 +23,7 @@
|
|||||||
|
|
||||||
#include <linux/time.h>
|
#include <linux/time.h>
|
||||||
#include <linux/jiffies.h>
|
#include <linux/jiffies.h>
|
||||||
|
#include <asm/bug.h>
|
||||||
|
|
||||||
/* Nanosecond scalar representation for kernel time values */
|
/* Nanosecond scalar representation for kernel time values */
|
||||||
typedef s64 ktime_t;
|
typedef s64 ktime_t;
|
||||||
|
@ -42,7 +42,7 @@ struct kvm_kernel_irqfd {
|
|||||||
wait_queue_entry_t wait;
|
wait_queue_entry_t wait;
|
||||||
/* Update side is protected by irqfds.lock */
|
/* Update side is protected by irqfds.lock */
|
||||||
struct kvm_kernel_irq_routing_entry irq_entry;
|
struct kvm_kernel_irq_routing_entry irq_entry;
|
||||||
seqcount_t irq_entry_sc;
|
seqcount_spinlock_t irq_entry_sc;
|
||||||
/* Used for level IRQ fast-path */
|
/* Used for level IRQ fast-path */
|
||||||
int gsi;
|
int gsi;
|
||||||
struct work_struct inject;
|
struct work_struct inject;
|
||||||
|
@ -11,6 +11,7 @@
|
|||||||
#define __LINUX_LOCKDEP_H
|
#define __LINUX_LOCKDEP_H
|
||||||
|
|
||||||
#include <linux/lockdep_types.h>
|
#include <linux/lockdep_types.h>
|
||||||
|
#include <linux/smp.h>
|
||||||
#include <asm/percpu.h>
|
#include <asm/percpu.h>
|
||||||
|
|
||||||
struct task_struct;
|
struct task_struct;
|
||||||
|
@ -65,6 +65,17 @@ struct mutex {
|
|||||||
#endif
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct ww_class;
|
||||||
|
struct ww_acquire_ctx;
|
||||||
|
|
||||||
|
struct ww_mutex {
|
||||||
|
struct mutex base;
|
||||||
|
struct ww_acquire_ctx *ctx;
|
||||||
|
#ifdef CONFIG_DEBUG_MUTEXES
|
||||||
|
struct ww_class *ww_class;
|
||||||
|
#endif
|
||||||
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This is the control structure for tasks blocked on mutex,
|
* This is the control structure for tasks blocked on mutex,
|
||||||
* which resides on the blocked task's kernel stack:
|
* which resides on the blocked task's kernel stack:
|
||||||
|
@ -32,6 +32,7 @@
|
|||||||
#include <linux/task_io_accounting.h>
|
#include <linux/task_io_accounting.h>
|
||||||
#include <linux/posix-timers.h>
|
#include <linux/posix-timers.h>
|
||||||
#include <linux/rseq.h>
|
#include <linux/rseq.h>
|
||||||
|
#include <linux/seqlock.h>
|
||||||
#include <linux/kcsan.h>
|
#include <linux/kcsan.h>
|
||||||
|
|
||||||
/* task_struct member predeclarations (sorted alphabetically): */
|
/* task_struct member predeclarations (sorted alphabetically): */
|
||||||
@ -1049,7 +1050,7 @@ struct task_struct {
|
|||||||
/* Protected by ->alloc_lock: */
|
/* Protected by ->alloc_lock: */
|
||||||
nodemask_t mems_allowed;
|
nodemask_t mems_allowed;
|
||||||
/* Seqence number to catch updates: */
|
/* Seqence number to catch updates: */
|
||||||
seqcount_t mems_allowed_seq;
|
seqcount_spinlock_t mems_allowed_seq;
|
||||||
int cpuset_mem_spread_rotor;
|
int cpuset_mem_spread_rotor;
|
||||||
int cpuset_slab_spread_rotor;
|
int cpuset_slab_spread_rotor;
|
||||||
#endif
|
#endif
|
||||||
|
@ -10,13 +10,16 @@
|
|||||||
*
|
*
|
||||||
* Copyrights:
|
* Copyrights:
|
||||||
* - Based on x86_64 vsyscall gettimeofday: Keith Owens, Andrea Arcangeli
|
* - Based on x86_64 vsyscall gettimeofday: Keith Owens, Andrea Arcangeli
|
||||||
|
* - Sequence counters with associated locks, (C) 2020 Linutronix GmbH
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <linux/spinlock.h>
|
|
||||||
#include <linux/preempt.h>
|
|
||||||
#include <linux/lockdep.h>
|
|
||||||
#include <linux/compiler.h>
|
#include <linux/compiler.h>
|
||||||
#include <linux/kcsan-checks.h>
|
#include <linux/kcsan-checks.h>
|
||||||
|
#include <linux/lockdep.h>
|
||||||
|
#include <linux/mutex.h>
|
||||||
|
#include <linux/preempt.h>
|
||||||
|
#include <linux/spinlock.h>
|
||||||
|
|
||||||
#include <asm/processor.h>
|
#include <asm/processor.h>
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -48,6 +51,10 @@
|
|||||||
* This mechanism can't be used if the protected data contains pointers,
|
* This mechanism can't be used if the protected data contains pointers,
|
||||||
* as the writer can invalidate a pointer that a reader is following.
|
* as the writer can invalidate a pointer that a reader is following.
|
||||||
*
|
*
|
||||||
|
* If the write serialization mechanism is one of the common kernel
|
||||||
|
* locking primitives, use a sequence counter with associated lock
|
||||||
|
* (seqcount_LOCKTYPE_t) instead.
|
||||||
|
*
|
||||||
* If it's desired to automatically handle the sequence counter writer
|
* If it's desired to automatically handle the sequence counter writer
|
||||||
* serialization and non-preemptibility requirements, use a sequential
|
* serialization and non-preemptibility requirements, use a sequential
|
||||||
* lock (seqlock_t) instead.
|
* lock (seqlock_t) instead.
|
||||||
@ -72,17 +79,18 @@ static inline void __seqcount_init(seqcount_t *s, const char *name,
|
|||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||||
# define SEQCOUNT_DEP_MAP_INIT(lockname) \
|
|
||||||
.dep_map = { .name = #lockname } \
|
# define SEQCOUNT_DEP_MAP_INIT(lockname) \
|
||||||
|
.dep_map = { .name = #lockname }
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* seqcount_init() - runtime initializer for seqcount_t
|
* seqcount_init() - runtime initializer for seqcount_t
|
||||||
* @s: Pointer to the seqcount_t instance
|
* @s: Pointer to the seqcount_t instance
|
||||||
*/
|
*/
|
||||||
# define seqcount_init(s) \
|
# define seqcount_init(s) \
|
||||||
do { \
|
do { \
|
||||||
static struct lock_class_key __key; \
|
static struct lock_class_key __key; \
|
||||||
__seqcount_init((s), #s, &__key); \
|
__seqcount_init((s), #s, &__key); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
|
static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
|
||||||
@ -108,9 +116,143 @@ static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
|
|||||||
*/
|
*/
|
||||||
#define SEQCNT_ZERO(name) { .sequence = 0, SEQCOUNT_DEP_MAP_INIT(name) }
|
#define SEQCNT_ZERO(name) { .sequence = 0, SEQCOUNT_DEP_MAP_INIT(name) }
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Sequence counters with associated locks (seqcount_LOCKTYPE_t)
|
||||||
|
*
|
||||||
|
* A sequence counter which associates the lock used for writer
|
||||||
|
* serialization at initialization time. This enables lockdep to validate
|
||||||
|
* that the write side critical section is properly serialized.
|
||||||
|
*
|
||||||
|
* For associated locks which do not implicitly disable preemption,
|
||||||
|
* preemption protection is enforced in the write side function.
|
||||||
|
*
|
||||||
|
* Lockdep is never used in any for the raw write variants.
|
||||||
|
*
|
||||||
|
* See Documentation/locking/seqlock.rst
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifdef CONFIG_LOCKDEP
|
||||||
|
#define __SEQ_LOCK(expr) expr
|
||||||
|
#else
|
||||||
|
#define __SEQ_LOCK(expr)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/**
|
||||||
|
* typedef seqcount_LOCKNAME_t - sequence counter with LOCKTYPR associated
|
||||||
|
* @seqcount: The real sequence counter
|
||||||
|
* @lock: Pointer to the associated spinlock
|
||||||
|
*
|
||||||
|
* A plain sequence counter with external writer synchronization by a
|
||||||
|
* spinlock. The spinlock is associated to the sequence count in the
|
||||||
|
* static initializer or init function. This enables lockdep to validate
|
||||||
|
* that the write side critical section is properly serialized.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* seqcount_LOCKNAME_init() - runtime initializer for seqcount_LOCKNAME_t
|
||||||
|
* @s: Pointer to the seqcount_LOCKNAME_t instance
|
||||||
|
* @lock: Pointer to the associated LOCKTYPE
|
||||||
|
*/
|
||||||
|
|
||||||
|
/*
|
||||||
|
* SEQCOUNT_LOCKTYPE() - Instantiate seqcount_LOCKNAME_t and helpers
|
||||||
|
* @locktype: actual typename
|
||||||
|
* @lockname: name
|
||||||
|
* @preemptible: preemptibility of above locktype
|
||||||
|
* @lockmember: argument for lockdep_assert_held()
|
||||||
|
*/
|
||||||
|
#define SEQCOUNT_LOCKTYPE(locktype, lockname, preemptible, lockmember) \
|
||||||
|
typedef struct seqcount_##lockname { \
|
||||||
|
seqcount_t seqcount; \
|
||||||
|
__SEQ_LOCK(locktype *lock); \
|
||||||
|
} seqcount_##lockname##_t; \
|
||||||
|
\
|
||||||
|
static __always_inline void \
|
||||||
|
seqcount_##lockname##_init(seqcount_##lockname##_t *s, locktype *lock) \
|
||||||
|
{ \
|
||||||
|
seqcount_init(&s->seqcount); \
|
||||||
|
__SEQ_LOCK(s->lock = lock); \
|
||||||
|
} \
|
||||||
|
\
|
||||||
|
static __always_inline seqcount_t * \
|
||||||
|
__seqcount_##lockname##_ptr(seqcount_##lockname##_t *s) \
|
||||||
|
{ \
|
||||||
|
return &s->seqcount; \
|
||||||
|
} \
|
||||||
|
\
|
||||||
|
static __always_inline bool \
|
||||||
|
__seqcount_##lockname##_preemptible(seqcount_##lockname##_t *s) \
|
||||||
|
{ \
|
||||||
|
return preemptible; \
|
||||||
|
} \
|
||||||
|
\
|
||||||
|
static __always_inline void \
|
||||||
|
__seqcount_##lockname##_assert(seqcount_##lockname##_t *s) \
|
||||||
|
{ \
|
||||||
|
__SEQ_LOCK(lockdep_assert_held(lockmember)); \
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* __seqprop() for seqcount_t
|
||||||
|
*/
|
||||||
|
|
||||||
|
static inline seqcount_t *__seqcount_ptr(seqcount_t *s)
|
||||||
|
{
|
||||||
|
return s;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline bool __seqcount_preemptible(seqcount_t *s)
|
||||||
|
{
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void __seqcount_assert(seqcount_t *s)
|
||||||
|
{
|
||||||
|
lockdep_assert_preemption_disabled();
|
||||||
|
}
|
||||||
|
|
||||||
|
SEQCOUNT_LOCKTYPE(raw_spinlock_t, raw_spinlock, false, s->lock)
|
||||||
|
SEQCOUNT_LOCKTYPE(spinlock_t, spinlock, false, s->lock)
|
||||||
|
SEQCOUNT_LOCKTYPE(rwlock_t, rwlock, false, s->lock)
|
||||||
|
SEQCOUNT_LOCKTYPE(struct mutex, mutex, true, s->lock)
|
||||||
|
SEQCOUNT_LOCKTYPE(struct ww_mutex, ww_mutex, true, &s->lock->base)
|
||||||
|
|
||||||
|
/**
|
||||||
|
* SEQCNT_LOCKNAME_ZERO - static initializer for seqcount_LOCKNAME_t
|
||||||
|
* @name: Name of the seqcount_LOCKNAME_t instance
|
||||||
|
* @lock: Pointer to the associated LOCKTYPE
|
||||||
|
*/
|
||||||
|
|
||||||
|
#define SEQCOUNT_LOCKTYPE_ZERO(seq_name, assoc_lock) { \
|
||||||
|
.seqcount = SEQCNT_ZERO(seq_name.seqcount), \
|
||||||
|
__SEQ_LOCK(.lock = (assoc_lock)) \
|
||||||
|
}
|
||||||
|
|
||||||
|
#define SEQCNT_SPINLOCK_ZERO(name, lock) SEQCOUNT_LOCKTYPE_ZERO(name, lock)
|
||||||
|
#define SEQCNT_RAW_SPINLOCK_ZERO(name, lock) SEQCOUNT_LOCKTYPE_ZERO(name, lock)
|
||||||
|
#define SEQCNT_RWLOCK_ZERO(name, lock) SEQCOUNT_LOCKTYPE_ZERO(name, lock)
|
||||||
|
#define SEQCNT_MUTEX_ZERO(name, lock) SEQCOUNT_LOCKTYPE_ZERO(name, lock)
|
||||||
|
#define SEQCNT_WW_MUTEX_ZERO(name, lock) SEQCOUNT_LOCKTYPE_ZERO(name, lock)
|
||||||
|
|
||||||
|
|
||||||
|
#define __seqprop_case(s, lockname, prop) \
|
||||||
|
seqcount_##lockname##_t: __seqcount_##lockname##_##prop((void *)(s))
|
||||||
|
|
||||||
|
#define __seqprop(s, prop) _Generic(*(s), \
|
||||||
|
seqcount_t: __seqcount_##prop((void *)(s)), \
|
||||||
|
__seqprop_case((s), raw_spinlock, prop), \
|
||||||
|
__seqprop_case((s), spinlock, prop), \
|
||||||
|
__seqprop_case((s), rwlock, prop), \
|
||||||
|
__seqprop_case((s), mutex, prop), \
|
||||||
|
__seqprop_case((s), ww_mutex, prop))
|
||||||
|
|
||||||
|
#define __seqcount_ptr(s) __seqprop(s, ptr)
|
||||||
|
#define __seqcount_lock_preemptible(s) __seqprop(s, preemptible)
|
||||||
|
#define __seqcount_assert_lock_held(s) __seqprop(s, assert)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* __read_seqcount_begin() - begin a seqcount_t read section w/o barrier
|
* __read_seqcount_begin() - begin a seqcount_t read section w/o barrier
|
||||||
* @s: Pointer to seqcount_t
|
* @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
|
||||||
*
|
*
|
||||||
* __read_seqcount_begin is like read_seqcount_begin, but has no smp_rmb()
|
* __read_seqcount_begin is like read_seqcount_begin, but has no smp_rmb()
|
||||||
* barrier. Callers should ensure that smp_rmb() or equivalent ordering is
|
* barrier. Callers should ensure that smp_rmb() or equivalent ordering is
|
||||||
@ -122,7 +264,10 @@ static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
|
|||||||
*
|
*
|
||||||
* Return: count to be passed to read_seqcount_retry()
|
* Return: count to be passed to read_seqcount_retry()
|
||||||
*/
|
*/
|
||||||
static inline unsigned __read_seqcount_begin(const seqcount_t *s)
|
#define __read_seqcount_begin(s) \
|
||||||
|
__read_seqcount_t_begin(__seqcount_ptr(s))
|
||||||
|
|
||||||
|
static inline unsigned __read_seqcount_t_begin(const seqcount_t *s)
|
||||||
{
|
{
|
||||||
unsigned ret;
|
unsigned ret;
|
||||||
|
|
||||||
@ -138,32 +283,38 @@ repeat:
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* raw_read_seqcount_begin() - begin a seqcount_t read section w/o lockdep
|
* raw_read_seqcount_begin() - begin a seqcount_t read section w/o lockdep
|
||||||
* @s: Pointer to seqcount_t
|
* @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
|
||||||
*
|
*
|
||||||
* Return: count to be passed to read_seqcount_retry()
|
* Return: count to be passed to read_seqcount_retry()
|
||||||
*/
|
*/
|
||||||
static inline unsigned raw_read_seqcount_begin(const seqcount_t *s)
|
#define raw_read_seqcount_begin(s) \
|
||||||
|
raw_read_seqcount_t_begin(__seqcount_ptr(s))
|
||||||
|
|
||||||
|
static inline unsigned raw_read_seqcount_t_begin(const seqcount_t *s)
|
||||||
{
|
{
|
||||||
unsigned ret = __read_seqcount_begin(s);
|
unsigned ret = __read_seqcount_t_begin(s);
|
||||||
smp_rmb();
|
smp_rmb();
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* read_seqcount_begin() - begin a seqcount_t read critical section
|
* read_seqcount_begin() - begin a seqcount_t read critical section
|
||||||
* @s: Pointer to seqcount_t
|
* @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
|
||||||
*
|
*
|
||||||
* Return: count to be passed to read_seqcount_retry()
|
* Return: count to be passed to read_seqcount_retry()
|
||||||
*/
|
*/
|
||||||
static inline unsigned read_seqcount_begin(const seqcount_t *s)
|
#define read_seqcount_begin(s) \
|
||||||
|
read_seqcount_t_begin(__seqcount_ptr(s))
|
||||||
|
|
||||||
|
static inline unsigned read_seqcount_t_begin(const seqcount_t *s)
|
||||||
{
|
{
|
||||||
seqcount_lockdep_reader_access(s);
|
seqcount_lockdep_reader_access(s);
|
||||||
return raw_read_seqcount_begin(s);
|
return raw_read_seqcount_t_begin(s);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* raw_read_seqcount() - read the raw seqcount_t counter value
|
* raw_read_seqcount() - read the raw seqcount_t counter value
|
||||||
* @s: Pointer to seqcount_t
|
* @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
|
||||||
*
|
*
|
||||||
* raw_read_seqcount opens a read critical section of the given
|
* raw_read_seqcount opens a read critical section of the given
|
||||||
* seqcount_t, without any lockdep checking, and without checking or
|
* seqcount_t, without any lockdep checking, and without checking or
|
||||||
@ -172,7 +323,10 @@ static inline unsigned read_seqcount_begin(const seqcount_t *s)
|
|||||||
*
|
*
|
||||||
* Return: count to be passed to read_seqcount_retry()
|
* Return: count to be passed to read_seqcount_retry()
|
||||||
*/
|
*/
|
||||||
static inline unsigned raw_read_seqcount(const seqcount_t *s)
|
#define raw_read_seqcount(s) \
|
||||||
|
raw_read_seqcount_t(__seqcount_ptr(s))
|
||||||
|
|
||||||
|
static inline unsigned raw_read_seqcount_t(const seqcount_t *s)
|
||||||
{
|
{
|
||||||
unsigned ret = READ_ONCE(s->sequence);
|
unsigned ret = READ_ONCE(s->sequence);
|
||||||
smp_rmb();
|
smp_rmb();
|
||||||
@ -183,7 +337,7 @@ static inline unsigned raw_read_seqcount(const seqcount_t *s)
|
|||||||
/**
|
/**
|
||||||
* raw_seqcount_begin() - begin a seqcount_t read critical section w/o
|
* raw_seqcount_begin() - begin a seqcount_t read critical section w/o
|
||||||
* lockdep and w/o counter stabilization
|
* lockdep and w/o counter stabilization
|
||||||
* @s: Pointer to seqcount_t
|
* @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
|
||||||
*
|
*
|
||||||
* raw_seqcount_begin opens a read critical section of the given
|
* raw_seqcount_begin opens a read critical section of the given
|
||||||
* seqcount_t. Unlike read_seqcount_begin(), this function will not wait
|
* seqcount_t. Unlike read_seqcount_begin(), this function will not wait
|
||||||
@ -197,18 +351,21 @@ static inline unsigned raw_read_seqcount(const seqcount_t *s)
|
|||||||
*
|
*
|
||||||
* Return: count to be passed to read_seqcount_retry()
|
* Return: count to be passed to read_seqcount_retry()
|
||||||
*/
|
*/
|
||||||
static inline unsigned raw_seqcount_begin(const seqcount_t *s)
|
#define raw_seqcount_begin(s) \
|
||||||
|
raw_seqcount_t_begin(__seqcount_ptr(s))
|
||||||
|
|
||||||
|
static inline unsigned raw_seqcount_t_begin(const seqcount_t *s)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* If the counter is odd, let read_seqcount_retry() fail
|
* If the counter is odd, let read_seqcount_retry() fail
|
||||||
* by decrementing the counter.
|
* by decrementing the counter.
|
||||||
*/
|
*/
|
||||||
return raw_read_seqcount(s) & ~1;
|
return raw_read_seqcount_t(s) & ~1;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* __read_seqcount_retry() - end a seqcount_t read section w/o barrier
|
* __read_seqcount_retry() - end a seqcount_t read section w/o barrier
|
||||||
* @s: Pointer to seqcount_t
|
* @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
|
||||||
* @start: count, from read_seqcount_begin()
|
* @start: count, from read_seqcount_begin()
|
||||||
*
|
*
|
||||||
* __read_seqcount_retry is like read_seqcount_retry, but has no smp_rmb()
|
* __read_seqcount_retry is like read_seqcount_retry, but has no smp_rmb()
|
||||||
@ -221,7 +378,10 @@ static inline unsigned raw_seqcount_begin(const seqcount_t *s)
|
|||||||
*
|
*
|
||||||
* Return: true if a read section retry is required, else false
|
* Return: true if a read section retry is required, else false
|
||||||
*/
|
*/
|
||||||
static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start)
|
#define __read_seqcount_retry(s, start) \
|
||||||
|
__read_seqcount_t_retry(__seqcount_ptr(s), start)
|
||||||
|
|
||||||
|
static inline int __read_seqcount_t_retry(const seqcount_t *s, unsigned start)
|
||||||
{
|
{
|
||||||
kcsan_atomic_next(0);
|
kcsan_atomic_next(0);
|
||||||
return unlikely(READ_ONCE(s->sequence) != start);
|
return unlikely(READ_ONCE(s->sequence) != start);
|
||||||
@ -229,7 +389,7 @@ static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start)
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* read_seqcount_retry() - end a seqcount_t read critical section
|
* read_seqcount_retry() - end a seqcount_t read critical section
|
||||||
* @s: Pointer to seqcount_t
|
* @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
|
||||||
* @start: count, from read_seqcount_begin()
|
* @start: count, from read_seqcount_begin()
|
||||||
*
|
*
|
||||||
* read_seqcount_retry closes the read critical section of given
|
* read_seqcount_retry closes the read critical section of given
|
||||||
@ -238,17 +398,28 @@ static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start)
|
|||||||
*
|
*
|
||||||
* Return: true if a read section retry is required, else false
|
* Return: true if a read section retry is required, else false
|
||||||
*/
|
*/
|
||||||
static inline int read_seqcount_retry(const seqcount_t *s, unsigned start)
|
#define read_seqcount_retry(s, start) \
|
||||||
|
read_seqcount_t_retry(__seqcount_ptr(s), start)
|
||||||
|
|
||||||
|
static inline int read_seqcount_t_retry(const seqcount_t *s, unsigned start)
|
||||||
{
|
{
|
||||||
smp_rmb();
|
smp_rmb();
|
||||||
return __read_seqcount_retry(s, start);
|
return __read_seqcount_t_retry(s, start);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* raw_write_seqcount_begin() - start a seqcount_t write section w/o lockdep
|
* raw_write_seqcount_begin() - start a seqcount_t write section w/o lockdep
|
||||||
* @s: Pointer to seqcount_t
|
* @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
|
||||||
*/
|
*/
|
||||||
static inline void raw_write_seqcount_begin(seqcount_t *s)
|
#define raw_write_seqcount_begin(s) \
|
||||||
|
do { \
|
||||||
|
if (__seqcount_lock_preemptible(s)) \
|
||||||
|
preempt_disable(); \
|
||||||
|
\
|
||||||
|
raw_write_seqcount_t_begin(__seqcount_ptr(s)); \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
static inline void raw_write_seqcount_t_begin(seqcount_t *s)
|
||||||
{
|
{
|
||||||
kcsan_nestable_atomic_begin();
|
kcsan_nestable_atomic_begin();
|
||||||
s->sequence++;
|
s->sequence++;
|
||||||
@ -257,49 +428,50 @@ static inline void raw_write_seqcount_begin(seqcount_t *s)
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* raw_write_seqcount_end() - end a seqcount_t write section w/o lockdep
|
* raw_write_seqcount_end() - end a seqcount_t write section w/o lockdep
|
||||||
* @s: Pointer to seqcount_t
|
* @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
|
||||||
*/
|
*/
|
||||||
static inline void raw_write_seqcount_end(seqcount_t *s)
|
#define raw_write_seqcount_end(s) \
|
||||||
|
do { \
|
||||||
|
raw_write_seqcount_t_end(__seqcount_ptr(s)); \
|
||||||
|
\
|
||||||
|
if (__seqcount_lock_preemptible(s)) \
|
||||||
|
preempt_enable(); \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
static inline void raw_write_seqcount_t_end(seqcount_t *s)
|
||||||
{
|
{
|
||||||
smp_wmb();
|
smp_wmb();
|
||||||
s->sequence++;
|
s->sequence++;
|
||||||
kcsan_nestable_atomic_end();
|
kcsan_nestable_atomic_end();
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __write_seqcount_begin_nested(seqcount_t *s, int subclass)
|
|
||||||
{
|
|
||||||
raw_write_seqcount_begin(s);
|
|
||||||
seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* write_seqcount_begin_nested() - start a seqcount_t write section with
|
* write_seqcount_begin_nested() - start a seqcount_t write section with
|
||||||
* custom lockdep nesting level
|
* custom lockdep nesting level
|
||||||
* @s: Pointer to seqcount_t
|
* @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
|
||||||
* @subclass: lockdep nesting level
|
* @subclass: lockdep nesting level
|
||||||
*
|
*
|
||||||
* See Documentation/locking/lockdep-design.rst
|
* See Documentation/locking/lockdep-design.rst
|
||||||
*/
|
*/
|
||||||
static inline void write_seqcount_begin_nested(seqcount_t *s, int subclass)
|
#define write_seqcount_begin_nested(s, subclass) \
|
||||||
{
|
do { \
|
||||||
lockdep_assert_preemption_disabled();
|
__seqcount_assert_lock_held(s); \
|
||||||
__write_seqcount_begin_nested(s, subclass);
|
\
|
||||||
}
|
if (__seqcount_lock_preemptible(s)) \
|
||||||
|
preempt_disable(); \
|
||||||
|
\
|
||||||
|
write_seqcount_t_begin_nested(__seqcount_ptr(s), subclass); \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
/*
|
static inline void write_seqcount_t_begin_nested(seqcount_t *s, int subclass)
|
||||||
* A write_seqcount_begin() variant w/o lockdep non-preemptibility checks.
|
|
||||||
*
|
|
||||||
* Use for internal seqlock.h code where it's known that preemption is
|
|
||||||
* already disabled. For example, seqlock_t write side functions.
|
|
||||||
*/
|
|
||||||
static inline void __write_seqcount_begin(seqcount_t *s)
|
|
||||||
{
|
{
|
||||||
__write_seqcount_begin_nested(s, 0);
|
raw_write_seqcount_t_begin(s);
|
||||||
|
seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* write_seqcount_begin() - start a seqcount_t write side critical section
|
* write_seqcount_begin() - start a seqcount_t write side critical section
|
||||||
* @s: Pointer to seqcount_t
|
* @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
|
||||||
*
|
*
|
||||||
* write_seqcount_begin opens a write side critical section of the given
|
* write_seqcount_begin opens a write side critical section of the given
|
||||||
* seqcount_t.
|
* seqcount_t.
|
||||||
@ -308,26 +480,44 @@ static inline void __write_seqcount_begin(seqcount_t *s)
|
|||||||
* non-preemptible. If readers can be invoked from hardirq or softirq
|
* non-preemptible. If readers can be invoked from hardirq or softirq
|
||||||
* context, interrupts or bottom halves must be respectively disabled.
|
* context, interrupts or bottom halves must be respectively disabled.
|
||||||
*/
|
*/
|
||||||
static inline void write_seqcount_begin(seqcount_t *s)
|
#define write_seqcount_begin(s) \
|
||||||
|
do { \
|
||||||
|
__seqcount_assert_lock_held(s); \
|
||||||
|
\
|
||||||
|
if (__seqcount_lock_preemptible(s)) \
|
||||||
|
preempt_disable(); \
|
||||||
|
\
|
||||||
|
write_seqcount_t_begin(__seqcount_ptr(s)); \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
static inline void write_seqcount_t_begin(seqcount_t *s)
|
||||||
{
|
{
|
||||||
write_seqcount_begin_nested(s, 0);
|
write_seqcount_t_begin_nested(s, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* write_seqcount_end() - end a seqcount_t write side critical section
|
* write_seqcount_end() - end a seqcount_t write side critical section
|
||||||
* @s: Pointer to seqcount_t
|
* @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
|
||||||
*
|
*
|
||||||
* The write section must've been opened with write_seqcount_begin().
|
* The write section must've been opened with write_seqcount_begin().
|
||||||
*/
|
*/
|
||||||
static inline void write_seqcount_end(seqcount_t *s)
|
#define write_seqcount_end(s) \
|
||||||
|
do { \
|
||||||
|
write_seqcount_t_end(__seqcount_ptr(s)); \
|
||||||
|
\
|
||||||
|
if (__seqcount_lock_preemptible(s)) \
|
||||||
|
preempt_enable(); \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
static inline void write_seqcount_t_end(seqcount_t *s)
|
||||||
{
|
{
|
||||||
seqcount_release(&s->dep_map, _RET_IP_);
|
seqcount_release(&s->dep_map, _RET_IP_);
|
||||||
raw_write_seqcount_end(s);
|
raw_write_seqcount_t_end(s);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* raw_write_seqcount_barrier() - do a seqcount_t write barrier
|
* raw_write_seqcount_barrier() - do a seqcount_t write barrier
|
||||||
* @s: Pointer to seqcount_t
|
* @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
|
||||||
*
|
*
|
||||||
* This can be used to provide an ordering guarantee instead of the usual
|
* This can be used to provide an ordering guarantee instead of the usual
|
||||||
* consistency guarantee. It is one wmb cheaper, because it can collapse
|
* consistency guarantee. It is one wmb cheaper, because it can collapse
|
||||||
@ -366,7 +556,10 @@ static inline void write_seqcount_end(seqcount_t *s)
|
|||||||
* WRITE_ONCE(X, false);
|
* WRITE_ONCE(X, false);
|
||||||
* }
|
* }
|
||||||
*/
|
*/
|
||||||
static inline void raw_write_seqcount_barrier(seqcount_t *s)
|
#define raw_write_seqcount_barrier(s) \
|
||||||
|
raw_write_seqcount_t_barrier(__seqcount_ptr(s))
|
||||||
|
|
||||||
|
static inline void raw_write_seqcount_t_barrier(seqcount_t *s)
|
||||||
{
|
{
|
||||||
kcsan_nestable_atomic_begin();
|
kcsan_nestable_atomic_begin();
|
||||||
s->sequence++;
|
s->sequence++;
|
||||||
@ -378,12 +571,15 @@ static inline void raw_write_seqcount_barrier(seqcount_t *s)
|
|||||||
/**
|
/**
|
||||||
* write_seqcount_invalidate() - invalidate in-progress seqcount_t read
|
* write_seqcount_invalidate() - invalidate in-progress seqcount_t read
|
||||||
* side operations
|
* side operations
|
||||||
* @s: Pointer to seqcount_t
|
* @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
|
||||||
*
|
*
|
||||||
* After write_seqcount_invalidate, no seqcount_t read side operations
|
* After write_seqcount_invalidate, no seqcount_t read side operations
|
||||||
* will complete successfully and see data older than this.
|
* will complete successfully and see data older than this.
|
||||||
*/
|
*/
|
||||||
static inline void write_seqcount_invalidate(seqcount_t *s)
|
#define write_seqcount_invalidate(s) \
|
||||||
|
write_seqcount_t_invalidate(__seqcount_ptr(s))
|
||||||
|
|
||||||
|
static inline void write_seqcount_t_invalidate(seqcount_t *s)
|
||||||
{
|
{
|
||||||
smp_wmb();
|
smp_wmb();
|
||||||
kcsan_nestable_atomic_begin();
|
kcsan_nestable_atomic_begin();
|
||||||
@ -393,7 +589,7 @@ static inline void write_seqcount_invalidate(seqcount_t *s)
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* raw_read_seqcount_latch() - pick even/odd seqcount_t latch data copy
|
* raw_read_seqcount_latch() - pick even/odd seqcount_t latch data copy
|
||||||
* @s: Pointer to seqcount_t
|
* @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
|
||||||
*
|
*
|
||||||
* Use seqcount_t latching to switch between two storage places protected
|
* Use seqcount_t latching to switch between two storage places protected
|
||||||
* by a sequence counter. Doing so allows having interruptible, preemptible,
|
* by a sequence counter. Doing so allows having interruptible, preemptible,
|
||||||
@ -406,7 +602,10 @@ static inline void write_seqcount_invalidate(seqcount_t *s)
|
|||||||
* picking which data copy to read. The full counter value must then be
|
* picking which data copy to read. The full counter value must then be
|
||||||
* checked with read_seqcount_retry().
|
* checked with read_seqcount_retry().
|
||||||
*/
|
*/
|
||||||
static inline int raw_read_seqcount_latch(seqcount_t *s)
|
#define raw_read_seqcount_latch(s) \
|
||||||
|
raw_read_seqcount_t_latch(__seqcount_ptr(s))
|
||||||
|
|
||||||
|
static inline int raw_read_seqcount_t_latch(seqcount_t *s)
|
||||||
{
|
{
|
||||||
/* Pairs with the first smp_wmb() in raw_write_seqcount_latch() */
|
/* Pairs with the first smp_wmb() in raw_write_seqcount_latch() */
|
||||||
int seq = READ_ONCE(s->sequence); /* ^^^ */
|
int seq = READ_ONCE(s->sequence); /* ^^^ */
|
||||||
@ -415,7 +614,7 @@ static inline int raw_read_seqcount_latch(seqcount_t *s)
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* raw_write_seqcount_latch() - redirect readers to even/odd copy
|
* raw_write_seqcount_latch() - redirect readers to even/odd copy
|
||||||
* @s: Pointer to seqcount_t
|
* @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
|
||||||
*
|
*
|
||||||
* The latch technique is a multiversion concurrency control method that allows
|
* The latch technique is a multiversion concurrency control method that allows
|
||||||
* queries during non-atomic modifications. If you can guarantee queries never
|
* queries during non-atomic modifications. If you can guarantee queries never
|
||||||
@ -494,7 +693,10 @@ static inline int raw_read_seqcount_latch(seqcount_t *s)
|
|||||||
* When data is a dynamic data structure; one should use regular RCU
|
* When data is a dynamic data structure; one should use regular RCU
|
||||||
* patterns to manage the lifetimes of the objects within.
|
* patterns to manage the lifetimes of the objects within.
|
||||||
*/
|
*/
|
||||||
static inline void raw_write_seqcount_latch(seqcount_t *s)
|
#define raw_write_seqcount_latch(s) \
|
||||||
|
raw_write_seqcount_t_latch(__seqcount_ptr(s))
|
||||||
|
|
||||||
|
static inline void raw_write_seqcount_t_latch(seqcount_t *s)
|
||||||
{
|
{
|
||||||
smp_wmb(); /* prior stores before incrementing "sequence" */
|
smp_wmb(); /* prior stores before incrementing "sequence" */
|
||||||
s->sequence++;
|
s->sequence++;
|
||||||
@ -516,20 +718,20 @@ typedef struct {
|
|||||||
spinlock_t lock;
|
spinlock_t lock;
|
||||||
} seqlock_t;
|
} seqlock_t;
|
||||||
|
|
||||||
#define __SEQLOCK_UNLOCKED(lockname) \
|
#define __SEQLOCK_UNLOCKED(lockname) \
|
||||||
{ \
|
{ \
|
||||||
.seqcount = SEQCNT_ZERO(lockname), \
|
.seqcount = SEQCNT_ZERO(lockname), \
|
||||||
.lock = __SPIN_LOCK_UNLOCKED(lockname) \
|
.lock = __SPIN_LOCK_UNLOCKED(lockname) \
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* seqlock_init() - dynamic initializer for seqlock_t
|
* seqlock_init() - dynamic initializer for seqlock_t
|
||||||
* @sl: Pointer to the seqlock_t instance
|
* @sl: Pointer to the seqlock_t instance
|
||||||
*/
|
*/
|
||||||
#define seqlock_init(sl) \
|
#define seqlock_init(sl) \
|
||||||
do { \
|
do { \
|
||||||
seqcount_init(&(sl)->seqcount); \
|
seqcount_init(&(sl)->seqcount); \
|
||||||
spin_lock_init(&(sl)->lock); \
|
spin_lock_init(&(sl)->lock); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -592,7 +794,7 @@ static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
|
|||||||
static inline void write_seqlock(seqlock_t *sl)
|
static inline void write_seqlock(seqlock_t *sl)
|
||||||
{
|
{
|
||||||
spin_lock(&sl->lock);
|
spin_lock(&sl->lock);
|
||||||
__write_seqcount_begin(&sl->seqcount);
|
write_seqcount_t_begin(&sl->seqcount);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -604,7 +806,7 @@ static inline void write_seqlock(seqlock_t *sl)
|
|||||||
*/
|
*/
|
||||||
static inline void write_sequnlock(seqlock_t *sl)
|
static inline void write_sequnlock(seqlock_t *sl)
|
||||||
{
|
{
|
||||||
write_seqcount_end(&sl->seqcount);
|
write_seqcount_t_end(&sl->seqcount);
|
||||||
spin_unlock(&sl->lock);
|
spin_unlock(&sl->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -618,7 +820,7 @@ static inline void write_sequnlock(seqlock_t *sl)
|
|||||||
static inline void write_seqlock_bh(seqlock_t *sl)
|
static inline void write_seqlock_bh(seqlock_t *sl)
|
||||||
{
|
{
|
||||||
spin_lock_bh(&sl->lock);
|
spin_lock_bh(&sl->lock);
|
||||||
__write_seqcount_begin(&sl->seqcount);
|
write_seqcount_t_begin(&sl->seqcount);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -631,7 +833,7 @@ static inline void write_seqlock_bh(seqlock_t *sl)
|
|||||||
*/
|
*/
|
||||||
static inline void write_sequnlock_bh(seqlock_t *sl)
|
static inline void write_sequnlock_bh(seqlock_t *sl)
|
||||||
{
|
{
|
||||||
write_seqcount_end(&sl->seqcount);
|
write_seqcount_t_end(&sl->seqcount);
|
||||||
spin_unlock_bh(&sl->lock);
|
spin_unlock_bh(&sl->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -645,7 +847,7 @@ static inline void write_sequnlock_bh(seqlock_t *sl)
|
|||||||
static inline void write_seqlock_irq(seqlock_t *sl)
|
static inline void write_seqlock_irq(seqlock_t *sl)
|
||||||
{
|
{
|
||||||
spin_lock_irq(&sl->lock);
|
spin_lock_irq(&sl->lock);
|
||||||
__write_seqcount_begin(&sl->seqcount);
|
write_seqcount_t_begin(&sl->seqcount);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -657,7 +859,7 @@ static inline void write_seqlock_irq(seqlock_t *sl)
|
|||||||
*/
|
*/
|
||||||
static inline void write_sequnlock_irq(seqlock_t *sl)
|
static inline void write_sequnlock_irq(seqlock_t *sl)
|
||||||
{
|
{
|
||||||
write_seqcount_end(&sl->seqcount);
|
write_seqcount_t_end(&sl->seqcount);
|
||||||
spin_unlock_irq(&sl->lock);
|
spin_unlock_irq(&sl->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -666,7 +868,7 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
|
|||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
spin_lock_irqsave(&sl->lock, flags);
|
spin_lock_irqsave(&sl->lock, flags);
|
||||||
__write_seqcount_begin(&sl->seqcount);
|
write_seqcount_t_begin(&sl->seqcount);
|
||||||
return flags;
|
return flags;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -695,13 +897,13 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
|
|||||||
static inline void
|
static inline void
|
||||||
write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
|
write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
|
||||||
{
|
{
|
||||||
write_seqcount_end(&sl->seqcount);
|
write_seqcount_t_end(&sl->seqcount);
|
||||||
spin_unlock_irqrestore(&sl->lock, flags);
|
spin_unlock_irqrestore(&sl->lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* read_seqlock_excl() - begin a seqlock_t locking reader section
|
* read_seqlock_excl() - begin a seqlock_t locking reader section
|
||||||
* @sl: Pointer to seqlock_t
|
* @sl: Pointer to seqlock_t
|
||||||
*
|
*
|
||||||
* read_seqlock_excl opens a seqlock_t locking reader critical section. A
|
* read_seqlock_excl opens a seqlock_t locking reader critical section. A
|
||||||
* locking reader exclusively locks out *both* other writers *and* other
|
* locking reader exclusively locks out *both* other writers *and* other
|
||||||
|
@ -3,7 +3,6 @@
|
|||||||
#define _LINUX_TIME_H
|
#define _LINUX_TIME_H
|
||||||
|
|
||||||
# include <linux/cache.h>
|
# include <linux/cache.h>
|
||||||
# include <linux/seqlock.h>
|
|
||||||
# include <linux/math64.h>
|
# include <linux/math64.h>
|
||||||
# include <linux/time64.h>
|
# include <linux/time64.h>
|
||||||
|
|
||||||
|
@ -57,6 +57,7 @@
|
|||||||
#define __LINUX_VIDEODEV2_H
|
#define __LINUX_VIDEODEV2_H
|
||||||
|
|
||||||
#include <linux/time.h> /* need struct timeval */
|
#include <linux/time.h> /* need struct timeval */
|
||||||
|
#include <linux/kernel.h>
|
||||||
#include <uapi/linux/videodev2.h>
|
#include <uapi/linux/videodev2.h>
|
||||||
|
|
||||||
#endif /* __LINUX_VIDEODEV2_H */
|
#endif /* __LINUX_VIDEODEV2_H */
|
||||||
|
@ -48,14 +48,6 @@ struct ww_acquire_ctx {
|
|||||||
#endif
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
struct ww_mutex {
|
|
||||||
struct mutex base;
|
|
||||||
struct ww_acquire_ctx *ctx;
|
|
||||||
#ifdef CONFIG_DEBUG_MUTEXES
|
|
||||||
struct ww_class *ww_class;
|
|
||||||
#endif
|
|
||||||
};
|
|
||||||
|
|
||||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||||
# define __WW_CLASS_MUTEX_INITIALIZER(lockname, class) \
|
# define __WW_CLASS_MUTEX_INITIALIZER(lockname, class) \
|
||||||
, .ww_class = class
|
, .ww_class = class
|
||||||
|
@ -298,7 +298,7 @@ int nf_conntrack_hash_resize(unsigned int hashsize);
|
|||||||
|
|
||||||
extern struct hlist_nulls_head *nf_conntrack_hash;
|
extern struct hlist_nulls_head *nf_conntrack_hash;
|
||||||
extern unsigned int nf_conntrack_htable_size;
|
extern unsigned int nf_conntrack_htable_size;
|
||||||
extern seqcount_t nf_conntrack_generation;
|
extern seqcount_spinlock_t nf_conntrack_generation;
|
||||||
extern unsigned int nf_conntrack_max;
|
extern unsigned int nf_conntrack_max;
|
||||||
|
|
||||||
/* must be called with rcu read lock held */
|
/* must be called with rcu read lock held */
|
||||||
|
@ -154,7 +154,8 @@ struct task_struct init_task
|
|||||||
.trc_holdout_list = LIST_HEAD_INIT(init_task.trc_holdout_list),
|
.trc_holdout_list = LIST_HEAD_INIT(init_task.trc_holdout_list),
|
||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_CPUSETS
|
#ifdef CONFIG_CPUSETS
|
||||||
.mems_allowed_seq = SEQCNT_ZERO(init_task.mems_allowed_seq),
|
.mems_allowed_seq = SEQCNT_SPINLOCK_ZERO(init_task.mems_allowed_seq,
|
||||||
|
&init_task.alloc_lock),
|
||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_RT_MUTEXES
|
#ifdef CONFIG_RT_MUTEXES
|
||||||
.pi_waiters = RB_ROOT_CACHED,
|
.pi_waiters = RB_ROOT_CACHED,
|
||||||
|
@ -2011,7 +2011,7 @@ static __latent_entropy struct task_struct *copy_process(
|
|||||||
#ifdef CONFIG_CPUSETS
|
#ifdef CONFIG_CPUSETS
|
||||||
p->cpuset_mem_spread_rotor = NUMA_NO_NODE;
|
p->cpuset_mem_spread_rotor = NUMA_NO_NODE;
|
||||||
p->cpuset_slab_spread_rotor = NUMA_NO_NODE;
|
p->cpuset_slab_spread_rotor = NUMA_NO_NODE;
|
||||||
seqcount_init(&p->mems_allowed_seq);
|
seqcount_spinlock_init(&p->mems_allowed_seq, &p->alloc_lock);
|
||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||||
memset(&p->irqtrace, 0, sizeof(p->irqtrace));
|
memset(&p->irqtrace, 0, sizeof(p->irqtrace));
|
||||||
|
@ -423,7 +423,7 @@ static void seq_lock_time(struct seq_file *m, struct lock_time *lt)
|
|||||||
seq_time(m, lt->min);
|
seq_time(m, lt->min);
|
||||||
seq_time(m, lt->max);
|
seq_time(m, lt->max);
|
||||||
seq_time(m, lt->total);
|
seq_time(m, lt->total);
|
||||||
seq_time(m, lt->nr ? div_s64(lt->total, lt->nr) : 0);
|
seq_time(m, lt->nr ? div64_u64(lt->total, lt->nr) : 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
|
static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
|
||||||
|
@ -135,7 +135,11 @@ static const int hrtimer_clock_to_base_table[MAX_CLOCKS] = {
|
|||||||
* timer->base->cpu_base
|
* timer->base->cpu_base
|
||||||
*/
|
*/
|
||||||
static struct hrtimer_cpu_base migration_cpu_base = {
|
static struct hrtimer_cpu_base migration_cpu_base = {
|
||||||
.clock_base = { { .cpu_base = &migration_cpu_base, }, },
|
.clock_base = { {
|
||||||
|
.cpu_base = &migration_cpu_base,
|
||||||
|
.seq = SEQCNT_RAW_SPINLOCK_ZERO(migration_cpu_base.seq,
|
||||||
|
&migration_cpu_base.lock),
|
||||||
|
}, },
|
||||||
};
|
};
|
||||||
|
|
||||||
#define migration_base migration_cpu_base.clock_base[0]
|
#define migration_base migration_cpu_base.clock_base[0]
|
||||||
@ -1998,8 +2002,11 @@ int hrtimers_prepare_cpu(unsigned int cpu)
|
|||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
|
for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
|
||||||
cpu_base->clock_base[i].cpu_base = cpu_base;
|
struct hrtimer_clock_base *clock_b = &cpu_base->clock_base[i];
|
||||||
timerqueue_init_head(&cpu_base->clock_base[i].active);
|
|
||||||
|
clock_b->cpu_base = cpu_base;
|
||||||
|
seqcount_raw_spinlock_init(&clock_b->seq, &cpu_base->lock);
|
||||||
|
timerqueue_init_head(&clock_b->active);
|
||||||
}
|
}
|
||||||
|
|
||||||
cpu_base->cpu = cpu;
|
cpu_base->cpu = cpu;
|
||||||
|
@ -39,18 +39,19 @@ enum timekeeping_adv_mode {
|
|||||||
TK_ADV_FREQ
|
TK_ADV_FREQ
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static DEFINE_RAW_SPINLOCK(timekeeper_lock);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The most important data for readout fits into a single 64 byte
|
* The most important data for readout fits into a single 64 byte
|
||||||
* cache line.
|
* cache line.
|
||||||
*/
|
*/
|
||||||
static struct {
|
static struct {
|
||||||
seqcount_t seq;
|
seqcount_raw_spinlock_t seq;
|
||||||
struct timekeeper timekeeper;
|
struct timekeeper timekeeper;
|
||||||
} tk_core ____cacheline_aligned = {
|
} tk_core ____cacheline_aligned = {
|
||||||
.seq = SEQCNT_ZERO(tk_core.seq),
|
.seq = SEQCNT_RAW_SPINLOCK_ZERO(tk_core.seq, &timekeeper_lock),
|
||||||
};
|
};
|
||||||
|
|
||||||
static DEFINE_RAW_SPINLOCK(timekeeper_lock);
|
|
||||||
static struct timekeeper shadow_timekeeper;
|
static struct timekeeper shadow_timekeeper;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -63,7 +64,7 @@ static struct timekeeper shadow_timekeeper;
|
|||||||
* See @update_fast_timekeeper() below.
|
* See @update_fast_timekeeper() below.
|
||||||
*/
|
*/
|
||||||
struct tk_fast {
|
struct tk_fast {
|
||||||
seqcount_t seq;
|
seqcount_raw_spinlock_t seq;
|
||||||
struct tk_read_base base[2];
|
struct tk_read_base base[2];
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -80,11 +81,13 @@ static struct clocksource dummy_clock = {
|
|||||||
};
|
};
|
||||||
|
|
||||||
static struct tk_fast tk_fast_mono ____cacheline_aligned = {
|
static struct tk_fast tk_fast_mono ____cacheline_aligned = {
|
||||||
|
.seq = SEQCNT_RAW_SPINLOCK_ZERO(tk_fast_mono.seq, &timekeeper_lock),
|
||||||
.base[0] = { .clock = &dummy_clock, },
|
.base[0] = { .clock = &dummy_clock, },
|
||||||
.base[1] = { .clock = &dummy_clock, },
|
.base[1] = { .clock = &dummy_clock, },
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct tk_fast tk_fast_raw ____cacheline_aligned = {
|
static struct tk_fast tk_fast_raw ____cacheline_aligned = {
|
||||||
|
.seq = SEQCNT_RAW_SPINLOCK_ZERO(tk_fast_raw.seq, &timekeeper_lock),
|
||||||
.base[0] = { .clock = &dummy_clock, },
|
.base[0] = { .clock = &dummy_clock, },
|
||||||
.base[1] = { .clock = &dummy_clock, },
|
.base[1] = { .clock = &dummy_clock, },
|
||||||
};
|
};
|
||||||
@ -157,7 +160,7 @@ static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta)
|
|||||||
* tk_clock_read - atomic clocksource read() helper
|
* tk_clock_read - atomic clocksource read() helper
|
||||||
*
|
*
|
||||||
* This helper is necessary to use in the read paths because, while the
|
* This helper is necessary to use in the read paths because, while the
|
||||||
* seqlock ensures we don't return a bad value while structures are updated,
|
* seqcount ensures we don't return a bad value while structures are updated,
|
||||||
* it doesn't protect from potential crashes. There is the possibility that
|
* it doesn't protect from potential crashes. There is the possibility that
|
||||||
* the tkr's clocksource may change between the read reference, and the
|
* the tkr's clocksource may change between the read reference, and the
|
||||||
* clock reference passed to the read function. This can cause crashes if
|
* clock reference passed to the read function. This can cause crashes if
|
||||||
@ -222,10 +225,10 @@ static inline u64 timekeeping_get_delta(const struct tk_read_base *tkr)
|
|||||||
unsigned int seq;
|
unsigned int seq;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Since we're called holding a seqlock, the data may shift
|
* Since we're called holding a seqcount, the data may shift
|
||||||
* under us while we're doing the calculation. This can cause
|
* under us while we're doing the calculation. This can cause
|
||||||
* false positives, since we'd note a problem but throw the
|
* false positives, since we'd note a problem but throw the
|
||||||
* results away. So nest another seqlock here to atomically
|
* results away. So nest another seqcount here to atomically
|
||||||
* grab the points we are checking with.
|
* grab the points we are checking with.
|
||||||
*/
|
*/
|
||||||
do {
|
do {
|
||||||
@ -486,7 +489,7 @@ EXPORT_SYMBOL_GPL(ktime_get_raw_fast_ns);
|
|||||||
*
|
*
|
||||||
* To keep it NMI safe since we're accessing from tracing, we're not using a
|
* To keep it NMI safe since we're accessing from tracing, we're not using a
|
||||||
* separate timekeeper with updates to monotonic clock and boot offset
|
* separate timekeeper with updates to monotonic clock and boot offset
|
||||||
* protected with seqlocks. This has the following minor side effects:
|
* protected with seqcounts. This has the following minor side effects:
|
||||||
*
|
*
|
||||||
* (1) Its possible that a timestamp be taken after the boot offset is updated
|
* (1) Its possible that a timestamp be taken after the boot offset is updated
|
||||||
* but before the timekeeper is updated. If this happens, the new boot offset
|
* but before the timekeeper is updated. If this happens, the new boot offset
|
||||||
|
@ -180,7 +180,7 @@ EXPORT_SYMBOL_GPL(nf_conntrack_htable_size);
|
|||||||
|
|
||||||
unsigned int nf_conntrack_max __read_mostly;
|
unsigned int nf_conntrack_max __read_mostly;
|
||||||
EXPORT_SYMBOL_GPL(nf_conntrack_max);
|
EXPORT_SYMBOL_GPL(nf_conntrack_max);
|
||||||
seqcount_t nf_conntrack_generation __read_mostly;
|
seqcount_spinlock_t nf_conntrack_generation __read_mostly;
|
||||||
static unsigned int nf_conntrack_hash_rnd __read_mostly;
|
static unsigned int nf_conntrack_hash_rnd __read_mostly;
|
||||||
|
|
||||||
static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple,
|
static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple,
|
||||||
@ -2588,7 +2588,8 @@ int nf_conntrack_init_start(void)
|
|||||||
/* struct nf_ct_ext uses u8 to store offsets/size */
|
/* struct nf_ct_ext uses u8 to store offsets/size */
|
||||||
BUILD_BUG_ON(total_extension_size() > 255u);
|
BUILD_BUG_ON(total_extension_size() > 255u);
|
||||||
|
|
||||||
seqcount_init(&nf_conntrack_generation);
|
seqcount_spinlock_init(&nf_conntrack_generation,
|
||||||
|
&nf_conntrack_locks_all_lock);
|
||||||
|
|
||||||
for (i = 0; i < CONNTRACK_LOCKS; i++)
|
for (i = 0; i < CONNTRACK_LOCKS; i++)
|
||||||
spin_lock_init(&nf_conntrack_locks[i]);
|
spin_lock_init(&nf_conntrack_locks[i]);
|
||||||
|
@ -18,7 +18,7 @@
|
|||||||
struct nft_rbtree {
|
struct nft_rbtree {
|
||||||
struct rb_root root;
|
struct rb_root root;
|
||||||
rwlock_t lock;
|
rwlock_t lock;
|
||||||
seqcount_t count;
|
seqcount_rwlock_t count;
|
||||||
struct delayed_work gc_work;
|
struct delayed_work gc_work;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -523,7 +523,7 @@ static int nft_rbtree_init(const struct nft_set *set,
|
|||||||
struct nft_rbtree *priv = nft_set_priv(set);
|
struct nft_rbtree *priv = nft_set_priv(set);
|
||||||
|
|
||||||
rwlock_init(&priv->lock);
|
rwlock_init(&priv->lock);
|
||||||
seqcount_init(&priv->count);
|
seqcount_rwlock_init(&priv->count, &priv->lock);
|
||||||
priv->root = RB_ROOT;
|
priv->root = RB_ROOT;
|
||||||
|
|
||||||
INIT_DEFERRABLE_WORK(&priv->gc_work, nft_rbtree_gc);
|
INIT_DEFERRABLE_WORK(&priv->gc_work, nft_rbtree_gc);
|
||||||
|
@ -122,7 +122,7 @@ struct xfrm_pol_inexact_bin {
|
|||||||
/* list containing '*:*' policies */
|
/* list containing '*:*' policies */
|
||||||
struct hlist_head hhead;
|
struct hlist_head hhead;
|
||||||
|
|
||||||
seqcount_t count;
|
seqcount_spinlock_t count;
|
||||||
/* tree sorted by daddr/prefix */
|
/* tree sorted by daddr/prefix */
|
||||||
struct rb_root root_d;
|
struct rb_root root_d;
|
||||||
|
|
||||||
@ -155,7 +155,7 @@ static struct xfrm_policy_afinfo const __rcu *xfrm_policy_afinfo[AF_INET6 + 1]
|
|||||||
__read_mostly;
|
__read_mostly;
|
||||||
|
|
||||||
static struct kmem_cache *xfrm_dst_cache __ro_after_init;
|
static struct kmem_cache *xfrm_dst_cache __ro_after_init;
|
||||||
static __read_mostly seqcount_t xfrm_policy_hash_generation;
|
static __read_mostly seqcount_mutex_t xfrm_policy_hash_generation;
|
||||||
|
|
||||||
static struct rhashtable xfrm_policy_inexact_table;
|
static struct rhashtable xfrm_policy_inexact_table;
|
||||||
static const struct rhashtable_params xfrm_pol_inexact_params;
|
static const struct rhashtable_params xfrm_pol_inexact_params;
|
||||||
@ -719,7 +719,7 @@ xfrm_policy_inexact_alloc_bin(const struct xfrm_policy *pol, u8 dir)
|
|||||||
INIT_HLIST_HEAD(&bin->hhead);
|
INIT_HLIST_HEAD(&bin->hhead);
|
||||||
bin->root_d = RB_ROOT;
|
bin->root_d = RB_ROOT;
|
||||||
bin->root_s = RB_ROOT;
|
bin->root_s = RB_ROOT;
|
||||||
seqcount_init(&bin->count);
|
seqcount_spinlock_init(&bin->count, &net->xfrm.xfrm_policy_lock);
|
||||||
|
|
||||||
prev = rhashtable_lookup_get_insert_key(&xfrm_policy_inexact_table,
|
prev = rhashtable_lookup_get_insert_key(&xfrm_policy_inexact_table,
|
||||||
&bin->k, &bin->head,
|
&bin->k, &bin->head,
|
||||||
@ -1899,7 +1899,7 @@ static int xfrm_policy_match(const struct xfrm_policy *pol,
|
|||||||
|
|
||||||
static struct xfrm_pol_inexact_node *
|
static struct xfrm_pol_inexact_node *
|
||||||
xfrm_policy_lookup_inexact_addr(const struct rb_root *r,
|
xfrm_policy_lookup_inexact_addr(const struct rb_root *r,
|
||||||
seqcount_t *count,
|
seqcount_spinlock_t *count,
|
||||||
const xfrm_address_t *addr, u16 family)
|
const xfrm_address_t *addr, u16 family)
|
||||||
{
|
{
|
||||||
const struct rb_node *parent;
|
const struct rb_node *parent;
|
||||||
@ -4157,7 +4157,7 @@ void __init xfrm_init(void)
|
|||||||
{
|
{
|
||||||
register_pernet_subsys(&xfrm_net_ops);
|
register_pernet_subsys(&xfrm_net_ops);
|
||||||
xfrm_dev_init();
|
xfrm_dev_init();
|
||||||
seqcount_init(&xfrm_policy_hash_generation);
|
seqcount_mutex_init(&xfrm_policy_hash_generation, &hash_resize_mutex);
|
||||||
xfrm_input_init();
|
xfrm_input_init();
|
||||||
|
|
||||||
#ifdef CONFIG_XFRM_ESPINTCP
|
#ifdef CONFIG_XFRM_ESPINTCP
|
||||||
|
@ -303,7 +303,7 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
|
|||||||
INIT_LIST_HEAD(&irqfd->list);
|
INIT_LIST_HEAD(&irqfd->list);
|
||||||
INIT_WORK(&irqfd->inject, irqfd_inject);
|
INIT_WORK(&irqfd->inject, irqfd_inject);
|
||||||
INIT_WORK(&irqfd->shutdown, irqfd_shutdown);
|
INIT_WORK(&irqfd->shutdown, irqfd_shutdown);
|
||||||
seqcount_init(&irqfd->irq_entry_sc);
|
seqcount_spinlock_init(&irqfd->irq_entry_sc, &kvm->irqfds.lock);
|
||||||
|
|
||||||
f = fdget(args->fd);
|
f = fdget(args->fd);
|
||||||
if (!f.file) {
|
if (!f.file) {
|
||||||
|
Loading…
Reference in New Issue
Block a user