mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-23 11:04:44 +08:00
netfilter: conntrack: Use sequence counter with associated spinlock
A sequence counter write side critical section must be protected by some form of locking to serialize writers. A plain seqcount_t does not contain the information of which lock must be held when entering a write side critical section. Use the new seqcount_spinlock_t data type, which allows to associate a spinlock with the sequence counter. This enables lockdep to verify that the spinlock used for writer serialization is held when the write side critical section is entered. If lockdep is disabled this lock association is compiled out and has neither storage size nor runtime overhead. Signed-off-by: Ahmed S. Darwish <a.darwish@linutronix.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lkml.kernel.org/r/20200720155530.1173732-15-a.darwish@linutronix.de
This commit is contained in:
parent
b75058614f
commit
8201d923f4
@ -286,7 +286,7 @@ int nf_conntrack_hash_resize(unsigned int hashsize);
|
|||||||
|
|
||||||
extern struct hlist_nulls_head *nf_conntrack_hash;
|
extern struct hlist_nulls_head *nf_conntrack_hash;
|
||||||
extern unsigned int nf_conntrack_htable_size;
|
extern unsigned int nf_conntrack_htable_size;
|
||||||
extern seqcount_t nf_conntrack_generation;
|
extern seqcount_spinlock_t nf_conntrack_generation;
|
||||||
extern unsigned int nf_conntrack_max;
|
extern unsigned int nf_conntrack_max;
|
||||||
|
|
||||||
/* must be called with rcu read lock held */
|
/* must be called with rcu read lock held */
|
||||||
|
@ -180,7 +180,7 @@ EXPORT_SYMBOL_GPL(nf_conntrack_htable_size);
|
|||||||
|
|
||||||
unsigned int nf_conntrack_max __read_mostly;
|
unsigned int nf_conntrack_max __read_mostly;
|
||||||
EXPORT_SYMBOL_GPL(nf_conntrack_max);
|
EXPORT_SYMBOL_GPL(nf_conntrack_max);
|
||||||
seqcount_t nf_conntrack_generation __read_mostly;
|
seqcount_spinlock_t nf_conntrack_generation __read_mostly;
|
||||||
static unsigned int nf_conntrack_hash_rnd __read_mostly;
|
static unsigned int nf_conntrack_hash_rnd __read_mostly;
|
||||||
|
|
||||||
static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple,
|
static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple,
|
||||||
@ -2600,7 +2600,8 @@ int nf_conntrack_init_start(void)
|
|||||||
/* struct nf_ct_ext uses u8 to store offsets/size */
|
/* struct nf_ct_ext uses u8 to store offsets/size */
|
||||||
BUILD_BUG_ON(total_extension_size() > 255u);
|
BUILD_BUG_ON(total_extension_size() > 255u);
|
||||||
|
|
||||||
seqcount_init(&nf_conntrack_generation);
|
seqcount_spinlock_init(&nf_conntrack_generation,
|
||||||
|
&nf_conntrack_locks_all_lock);
|
||||||
|
|
||||||
for (i = 0; i < CONNTRACK_LOCKS; i++)
|
for (i = 0; i < CONNTRACK_LOCKS; i++)
|
||||||
spin_lock_init(&nf_conntrack_locks[i]);
|
spin_lock_init(&nf_conntrack_locks[i]);
|
||||||
|
Loading…
Reference in New Issue
Block a user