mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-23 20:24:12 +08:00
locking/static_key: Don't take sleeping locks in __static_key_slow_dec_deferred()
Changing jump_label state is protected by jump_label_lock(). Rate limited static_key_slow_dec(), however, will never directly call jump_label_update(), it will schedule a delayed work instead. Therefore it's unnecessary to take both the cpus_read_lock() and jump_label_lock(). This allows static_key_slow_dec_deferred() to be called from atomic contexts, like socket destructing in net/tls, without the need for another indirection. Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Simon Horman <simon.horman@netronome.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Will Deacon <will.deacon@arm.com> Cc: alexei.starovoitov@gmail.com Cc: ard.biesheuvel@linaro.org Cc: oss-drivers@netronome.com Cc: yamada.masahiro@socionext.com Link: https://lkml.kernel.org/r/20190330000854.30142-4-jakub.kicinski@netronome.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
b92e793bbe
commit
94b5f312cf
@ -221,9 +221,7 @@ static bool static_key_slow_try_dec(struct static_key *key)
|
||||
return true;
|
||||
}
|
||||
|
||||
static void __static_key_slow_dec_cpuslocked(struct static_key *key,
|
||||
unsigned long rate_limit,
|
||||
struct delayed_work *work)
|
||||
static void __static_key_slow_dec_cpuslocked(struct static_key *key)
|
||||
{
|
||||
lockdep_assert_cpus_held();
|
||||
|
||||
@ -231,23 +229,15 @@ static void __static_key_slow_dec_cpuslocked(struct static_key *key,
|
||||
return;
|
||||
|
||||
jump_label_lock();
|
||||
if (atomic_dec_and_test(&key->enabled)) {
|
||||
if (rate_limit) {
|
||||
atomic_inc(&key->enabled);
|
||||
schedule_delayed_work(work, rate_limit);
|
||||
} else {
|
||||
jump_label_update(key);
|
||||
}
|
||||
}
|
||||
if (atomic_dec_and_test(&key->enabled))
|
||||
jump_label_update(key);
|
||||
jump_label_unlock();
|
||||
}
|
||||
|
||||
static void __static_key_slow_dec(struct static_key *key,
|
||||
unsigned long rate_limit,
|
||||
struct delayed_work *work)
|
||||
static void __static_key_slow_dec(struct static_key *key)
|
||||
{
|
||||
cpus_read_lock();
|
||||
__static_key_slow_dec_cpuslocked(key, rate_limit, work);
|
||||
__static_key_slow_dec_cpuslocked(key);
|
||||
cpus_read_unlock();
|
||||
}
|
||||
|
||||
@ -255,21 +245,21 @@ void jump_label_update_timeout(struct work_struct *work)
|
||||
{
|
||||
struct static_key_deferred *key =
|
||||
container_of(work, struct static_key_deferred, work.work);
|
||||
__static_key_slow_dec(&key->key, 0, NULL);
|
||||
__static_key_slow_dec(&key->key);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(jump_label_update_timeout);
|
||||
|
||||
void static_key_slow_dec(struct static_key *key)
|
||||
{
|
||||
STATIC_KEY_CHECK_USE(key);
|
||||
__static_key_slow_dec(key, 0, NULL);
|
||||
__static_key_slow_dec(key);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(static_key_slow_dec);
|
||||
|
||||
void static_key_slow_dec_cpuslocked(struct static_key *key)
|
||||
{
|
||||
STATIC_KEY_CHECK_USE(key);
|
||||
__static_key_slow_dec_cpuslocked(key, 0, NULL);
|
||||
__static_key_slow_dec_cpuslocked(key);
|
||||
}
|
||||
|
||||
void __static_key_slow_dec_deferred(struct static_key *key,
|
||||
@ -277,7 +267,11 @@ void __static_key_slow_dec_deferred(struct static_key *key,
|
||||
unsigned long timeout)
|
||||
{
|
||||
STATIC_KEY_CHECK_USE(key);
|
||||
__static_key_slow_dec(key, timeout, work);
|
||||
|
||||
if (static_key_slow_try_dec(key))
|
||||
return;
|
||||
|
||||
schedule_delayed_work(work, timeout);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__static_key_slow_dec_deferred);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user