Skip to content

Commit

Permalink
locking/qspinlock_stat: Introduce generic lockevent_*() counting APIs
Browse files Browse the repository at this point in the history
The percpu event counts used by qspinlock code can be useful for
other locking code as well. So a new set of lockevent_* counting APIs
is introduced with the lock event names extracted out into the new
lock_events_list.h header file for easier addition in the future.

The existing qstat_inc() calls are replaced by either lockevent_inc() or
lockevent_cond_inc() calls.

The qstat_hop() call is renamed to lockevent_pv_hop(). The "reset_counters"
debugfs file is also renamed to ".reset_counts".

Signed-off-by: Waiman Long <[email protected]>
Acked-by: Peter Zijlstra <[email protected]>
Acked-by: Davidlohr Bueso <[email protected]>
Cc: Andrew Morton <[email protected]>
Cc: Arnd Bergmann <[email protected]>
Cc: Borislav Petkov <[email protected]>
Cc: Davidlohr Bueso <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Paul E. McKenney <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: Tim Chen <[email protected]>
Cc: Will Deacon <[email protected]>
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]>
  • Loading branch information
Waiman-Long authored and Ingo Molnar committed Apr 10, 2019
1 parent 3b4ba66 commit ad53fa1
Show file tree
Hide file tree
Showing 5 changed files with 181 additions and 114 deletions.
55 changes: 55 additions & 0 deletions kernel/locking/lock_events.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,55 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* Authors: Waiman Long <[email protected]>
*/

enum lock_events {

#include "lock_events_list.h"

lockevent_num, /* Total number of lock event counts */
LOCKEVENT_reset_cnts = lockevent_num,
};

#ifdef CONFIG_QUEUED_LOCK_STAT
/*
* Per-cpu counters
*/
DECLARE_PER_CPU(unsigned long, lockevents[lockevent_num]);

/*
* Increment the PV qspinlock statistical counters
*/
static inline void __lockevent_inc(enum lock_events event, bool cond)
{
if (cond)
__this_cpu_inc(lockevents[event]);
}

#define lockevent_inc(ev) __lockevent_inc(LOCKEVENT_ ##ev, true)
#define lockevent_cond_inc(ev, c) __lockevent_inc(LOCKEVENT_ ##ev, c)

static inline void __lockevent_add(enum lock_events event, int inc)
{
__this_cpu_add(lockevents[event], inc);
}

#define lockevent_add(ev, c) __lockevent_add(LOCKEVENT_ ##ev, c)

#else /* CONFIG_QUEUED_LOCK_STAT */

#define lockevent_inc(ev)
#define lockevent_add(ev, c)
#define lockevent_cond_inc(ev, c)

#endif /* CONFIG_QUEUED_LOCK_STAT */
50 changes: 50 additions & 0 deletions kernel/locking/lock_events_list.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* Authors: Waiman Long <[email protected]>
*/

#ifndef LOCK_EVENT
#define LOCK_EVENT(name) LOCKEVENT_ ## name,
#endif

#ifdef CONFIG_QUEUED_SPINLOCKS
#ifdef CONFIG_PARAVIRT_SPINLOCKS
/*
* Locking events for PV qspinlock.
*/
LOCK_EVENT(pv_hash_hops) /* Average # of hops per hashing operation */
LOCK_EVENT(pv_kick_unlock) /* # of vCPU kicks issued at unlock time */
LOCK_EVENT(pv_kick_wake) /* # of vCPU kicks for pv_latency_wake */
LOCK_EVENT(pv_latency_kick) /* Average latency (ns) of vCPU kick */
LOCK_EVENT(pv_latency_wake) /* Average latency (ns) of kick-to-wakeup */
LOCK_EVENT(pv_lock_stealing) /* # of lock stealing operations */
LOCK_EVENT(pv_spurious_wakeup) /* # of spurious wakeups in non-head vCPUs */
LOCK_EVENT(pv_wait_again) /* # of wait's after queue head vCPU kick */
LOCK_EVENT(pv_wait_early) /* # of early vCPU wait's */
LOCK_EVENT(pv_wait_head) /* # of vCPU wait's at the queue head */
LOCK_EVENT(pv_wait_node) /* # of vCPU wait's at non-head queue node */
#endif /* CONFIG_PARAVIRT_SPINLOCKS */

/*
* Locking events for qspinlock
*
* Subtracting lock_use_node[234] from lock_slowpath will give you
* lock_use_node1.
*/
LOCK_EVENT(lock_pending) /* # of locking ops via pending code */
LOCK_EVENT(lock_slowpath) /* # of locking ops via MCS lock queue */
LOCK_EVENT(lock_use_node2) /* # of locking ops that use 2nd percpu node */
LOCK_EVENT(lock_use_node3) /* # of locking ops that use 3rd percpu node */
LOCK_EVENT(lock_use_node4) /* # of locking ops that use 4th percpu node */
LOCK_EVENT(lock_no_node) /* # of locking ops w/o using percpu node */
#endif /* CONFIG_QUEUED_SPINLOCKS */
8 changes: 4 additions & 4 deletions kernel/locking/qspinlock.c
Original file line number Diff line number Diff line change
Expand Up @@ -395,15 +395,15 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
* 0,1,0 -> 0,0,1
*/
clear_pending_set_locked(lock);
qstat_inc(qstat_lock_pending, true);
lockevent_inc(lock_pending);
return;

/*
* End of pending bit optimistic spinning and beginning of MCS
* queuing.
*/
queue:
qstat_inc(qstat_lock_slowpath, true);
lockevent_inc(lock_slowpath);
pv_queue:
node = this_cpu_ptr(&qnodes[0].mcs);
idx = node->count++;
Expand All @@ -419,7 +419,7 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
* simple enough.
*/
if (unlikely(idx >= MAX_NODES)) {
qstat_inc(qstat_lock_no_node, true);
lockevent_inc(lock_no_node);
while (!queued_spin_trylock(lock))
cpu_relax();
goto release;
Expand All @@ -430,7 +430,7 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
/*
* Keep counts of non-zero index values:
*/
qstat_inc(qstat_lock_use_node2 + idx - 1, idx);
lockevent_cond_inc(lock_use_node2 + idx - 1, idx);

/*
* Ensure that we increment the head node->count before initialising
Expand Down
19 changes: 10 additions & 9 deletions kernel/locking/qspinlock_paravirt.h
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ static inline bool pv_hybrid_queued_unfair_trylock(struct qspinlock *lock)

if (!(val & _Q_LOCKED_PENDING_MASK) &&
(cmpxchg_acquire(&lock->locked, 0, _Q_LOCKED_VAL) == 0)) {
qstat_inc(qstat_pv_lock_stealing, true);
lockevent_inc(pv_lock_stealing);
return true;
}
if (!(val & _Q_TAIL_MASK) || (val & _Q_PENDING_MASK))
Expand Down Expand Up @@ -219,7 +219,7 @@ static struct qspinlock **pv_hash(struct qspinlock *lock, struct pv_node *node)
hopcnt++;
if (!cmpxchg(&he->lock, NULL, lock)) {
WRITE_ONCE(he->node, node);
qstat_hop(hopcnt);
lockevent_pv_hop(hopcnt);
return &he->lock;
}
}
Expand Down Expand Up @@ -320,8 +320,8 @@ static void pv_wait_node(struct mcs_spinlock *node, struct mcs_spinlock *prev)
smp_store_mb(pn->state, vcpu_halted);

if (!READ_ONCE(node->locked)) {
qstat_inc(qstat_pv_wait_node, true);
qstat_inc(qstat_pv_wait_early, wait_early);
lockevent_inc(pv_wait_node);
lockevent_cond_inc(pv_wait_early, wait_early);
pv_wait(&pn->state, vcpu_halted);
}

Expand All @@ -339,7 +339,8 @@ static void pv_wait_node(struct mcs_spinlock *node, struct mcs_spinlock *prev)
* So it is better to spin for a while in the hope that the
* MCS lock will be released soon.
*/
qstat_inc(qstat_pv_spurious_wakeup, !READ_ONCE(node->locked));
lockevent_cond_inc(pv_spurious_wakeup,
!READ_ONCE(node->locked));
}

/*
Expand Down Expand Up @@ -416,7 +417,7 @@ pv_wait_head_or_lock(struct qspinlock *lock, struct mcs_spinlock *node)
/*
* Tracking # of slowpath locking operations
*/
qstat_inc(qstat_lock_slowpath, true);
lockevent_inc(lock_slowpath);

for (;; waitcnt++) {
/*
Expand Down Expand Up @@ -464,8 +465,8 @@ pv_wait_head_or_lock(struct qspinlock *lock, struct mcs_spinlock *node)
}
}
WRITE_ONCE(pn->state, vcpu_hashed);
qstat_inc(qstat_pv_wait_head, true);
qstat_inc(qstat_pv_wait_again, waitcnt);
lockevent_inc(pv_wait_head);
lockevent_cond_inc(pv_wait_again, waitcnt);
pv_wait(&lock->locked, _Q_SLOW_VAL);

/*
Expand Down Expand Up @@ -528,7 +529,7 @@ __pv_queued_spin_unlock_slowpath(struct qspinlock *lock, u8 locked)
* vCPU is harmless other than the additional latency in completing
* the unlock.
*/
qstat_inc(qstat_pv_kick_unlock, true);
lockevent_inc(pv_kick_unlock);
pv_kick(node->cpu);
}

Expand Down
Loading

0 comments on commit ad53fa1

Please sign in to comment.