Skip to content

Commit

Permalink
locking/static_key: Don't take sleeping locks in __static_key_slow_de…
Browse files Browse the repository at this point in the history
…c_deferred()

Changing jump_label state is protected by jump_label_lock().
Rate limited static_key_slow_dec(), however, will never
directly call jump_label_update(), it will schedule a delayed
work instead.  Therefore it's unnecessary to take both the
cpus_read_lock() and jump_label_lock().

This allows static_key_slow_dec_deferred() to be called
from atomic contexts, like socket destructing in net/tls,
without the need for another indirection.

Signed-off-by: Jakub Kicinski <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Reviewed-by: Simon Horman <[email protected]>
Cc: Andrew Morton <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Paul E. McKenney <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: Will Deacon <[email protected]>
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Link: https://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]>
  • Loading branch information
Jakub Kicinski authored and Ingo Molnar committed Apr 29, 2019
1 parent b92e793 commit 94b5f31
Showing 1 changed file with 13 additions and 19 deletions.
32 changes: 13 additions & 19 deletions kernel/jump_label.c
Original file line number Diff line number Diff line change
Expand Up @@ -221,63 +221,57 @@ static bool static_key_slow_try_dec(struct static_key *key)
return true;
}

static void __static_key_slow_dec_cpuslocked(struct static_key *key,
unsigned long rate_limit,
struct delayed_work *work)
static void __static_key_slow_dec_cpuslocked(struct static_key *key)
{
lockdep_assert_cpus_held();

if (static_key_slow_try_dec(key))
return;

jump_label_lock();
if (atomic_dec_and_test(&key->enabled)) {
if (rate_limit) {
atomic_inc(&key->enabled);
schedule_delayed_work(work, rate_limit);
} else {
jump_label_update(key);
}
}
if (atomic_dec_and_test(&key->enabled))
jump_label_update(key);
jump_label_unlock();
}

static void __static_key_slow_dec(struct static_key *key,
unsigned long rate_limit,
struct delayed_work *work)
static void __static_key_slow_dec(struct static_key *key)
{
cpus_read_lock();
__static_key_slow_dec_cpuslocked(key, rate_limit, work);
__static_key_slow_dec_cpuslocked(key);
cpus_read_unlock();
}

void jump_label_update_timeout(struct work_struct *work)
{
struct static_key_deferred *key =
container_of(work, struct static_key_deferred, work.work);
__static_key_slow_dec(&key->key, 0, NULL);
__static_key_slow_dec(&key->key);
}
EXPORT_SYMBOL_GPL(jump_label_update_timeout);

void static_key_slow_dec(struct static_key *key)
{
STATIC_KEY_CHECK_USE(key);
__static_key_slow_dec(key, 0, NULL);
__static_key_slow_dec(key);
}
EXPORT_SYMBOL_GPL(static_key_slow_dec);

void static_key_slow_dec_cpuslocked(struct static_key *key)
{
STATIC_KEY_CHECK_USE(key);
__static_key_slow_dec_cpuslocked(key, 0, NULL);
__static_key_slow_dec_cpuslocked(key);
}

void __static_key_slow_dec_deferred(struct static_key *key,
struct delayed_work *work,
unsigned long timeout)
{
STATIC_KEY_CHECK_USE(key);
__static_key_slow_dec(key, timeout, work);

if (static_key_slow_try_dec(key))
return;

schedule_delayed_work(work, timeout);
}
EXPORT_SYMBOL_GPL(__static_key_slow_dec_deferred);

Expand Down

0 comments on commit 94b5f31

Please sign in to comment.