Skip to content

Commit

Permalink
jump_label: Split out code under the hotplug lock
Browse files Browse the repository at this point in the history
In order to later introduce an "already locked" version of some
of the static key funcions, let's split the code into the core stuff
(the *_cpuslocked functions) and the usual helpers, which now
take/release the hotplug lock and call into the _cpuslocked
versions.

Signed-off-by: Marc Zyngier <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Cc: Leo Yan <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: [email protected]
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]>
  • Loading branch information
Marc Zyngier authored and Ingo Molnar committed Aug 10, 2017
1 parent b70cecf commit 8b7b412
Showing 1 changed file with 19 additions and 9 deletions.
28 changes: 19 additions & 9 deletions kernel/jump_label.c
Original file line number Diff line number Diff line change
Expand Up @@ -79,11 +79,10 @@ int static_key_count(struct static_key *key)
}
EXPORT_SYMBOL_GPL(static_key_count);

void static_key_slow_inc(struct static_key *key)
static void static_key_slow_inc_cpuslocked(struct static_key *key)
{
int v, v1;

cpus_read_lock();
STATIC_KEY_CHECK_USE();

/*
Expand All @@ -100,10 +99,8 @@ void static_key_slow_inc(struct static_key *key)
*/
for (v = atomic_read(&key->enabled); v > 0; v = v1) {
v1 = atomic_cmpxchg(&key->enabled, v, v + 1);
if (likely(v1 == v)) {
cpus_read_unlock();
if (likely(v1 == v))
return;
}
}

jump_label_lock();
Expand All @@ -119,6 +116,12 @@ void static_key_slow_inc(struct static_key *key)
atomic_inc(&key->enabled);
}
jump_label_unlock();
}

void static_key_slow_inc(struct static_key *key)
{
cpus_read_lock();
static_key_slow_inc_cpuslocked(key);
cpus_read_unlock();
}
EXPORT_SYMBOL_GPL(static_key_slow_inc);
Expand Down Expand Up @@ -163,10 +166,10 @@ void static_key_disable(struct static_key *key)
}
EXPORT_SYMBOL_GPL(static_key_disable);

static void __static_key_slow_dec(struct static_key *key,
unsigned long rate_limit, struct delayed_work *work)
static void static_key_slow_dec_cpuslocked(struct static_key *key,
unsigned long rate_limit,
struct delayed_work *work)
{
cpus_read_lock();
/*
* The negative count check is valid even when a negative
* key->enabled is in use by static_key_slow_inc(); a
Expand All @@ -177,7 +180,6 @@ static void __static_key_slow_dec(struct static_key *key,
if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) {
WARN(atomic_read(&key->enabled) < 0,
"jump label: negative count!\n");
cpus_read_unlock();
return;
}

Expand All @@ -188,6 +190,14 @@ static void __static_key_slow_dec(struct static_key *key,
jump_label_update(key);
}
jump_label_unlock();
}

static void __static_key_slow_dec(struct static_key *key,
unsigned long rate_limit,
struct delayed_work *work)
{
cpus_read_lock();
static_key_slow_dec_cpuslocked(key, rate_limit, work);
cpus_read_unlock();
}

Expand Down

0 comments on commit 8b7b412

Please sign in to comment.