Skip to content

Commit

Permalink
perf/hw_breakpoint: Optimize max_bp_pinned_slots() for CPU-independen…
Browse files Browse the repository at this point in the history
…t task targets

Running the perf benchmark with (note: more aggressive parameters vs.
preceding changes, but same 256 CPUs host):

 | $> perf bench -r 100 breakpoint thread -b 4 -p 128 -t 512
 | # Running 'breakpoint/thread' benchmark:
 | # Created/joined 100 threads with 4 breakpoints and 128 parallelism
 |      Total time: 1.989 [sec]
 |
 |       38.854160 usecs/op
 |     4973.332500 usecs/op/cpu

    20.43%  [kernel]       [k] queued_spin_lock_slowpath
    18.75%  [kernel]       [k] osq_lock
    16.98%  [kernel]       [k] rhashtable_jhash2
     8.34%  [kernel]       [k] task_bp_pinned
     4.23%  [kernel]       [k] smp_cfm_core_cond
     3.65%  [kernel]       [k] bcmp
     2.83%  [kernel]       [k] toggle_bp_slot
     1.87%  [kernel]       [k] find_next_bit
     1.49%  [kernel]       [k] __reserve_bp_slot

We can see that a majority of the time is now spent hashing task
pointers to index into task_bps_ht in task_bp_pinned().

Obtaining the max_bp_pinned_slots() for CPU-independent task targets
currently is O(#cpus), and calls task_bp_pinned() for each CPU, even if
the result of task_bp_pinned() is CPU-independent.

The loop in max_bp_pinned_slots() wants to compute the maximum slots
across all CPUs. If task_bp_pinned() is CPU-independent, we can do so by
obtaining the max slots across all CPUs and adding task_bp_pinned().

To do so in O(1), use a bp_slots_histogram for CPU-pinned slots.

After this optimization:

 | $> perf bench -r 100 breakpoint thread -b 4 -p 128 -t 512
 | # Running 'breakpoint/thread' benchmark:
 | # Created/joined 100 threads with 4 breakpoints and 128 parallelism
 |      Total time: 1.930 [sec]
 |
 |       37.697832 usecs/op
 |     4825.322500 usecs/op/cpu

    19.13%  [kernel]       [k] queued_spin_lock_slowpath
    18.21%  [kernel]       [k] rhashtable_jhash2
    15.46%  [kernel]       [k] osq_lock
     6.27%  [kernel]       [k] toggle_bp_slot
     5.91%  [kernel]       [k] task_bp_pinned
     5.05%  [kernel]       [k] smp_cfm_core_cond
     1.78%  [kernel]       [k] update_sg_lb_stats
     1.36%  [kernel]       [k] llist_reverse_order
     1.34%  [kernel]       [k] find_next_bit
     1.19%  [kernel]       [k] bcmp

Suggesting that time spent in task_bp_pinned() has been reduced.
However, we're still hashing too much, which will be addressed in the
subsequent change.

Signed-off-by: Marco Elver <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Reviewed-by: Dmitry Vyukov <[email protected]>
Acked-by: Ian Rogers <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
  • Loading branch information
melver authored and Peter Zijlstra committed Aug 30, 2022
1 parent 16db283 commit 9b1933b
Showing 1 changed file with 53 additions and 4 deletions.
57 changes: 53 additions & 4 deletions kernel/events/hw_breakpoint.c
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,9 @@ static struct bp_cpuinfo *get_bp_info(int cpu, enum bp_type_idx type)
return per_cpu_ptr(bp_cpuinfo + type, cpu);
}

/* Number of pinned CPU breakpoints globally. */
static struct bp_slots_histogram cpu_pinned[TYPE_MAX];

/* Keep track of the breakpoints attached to tasks */
static struct rhltable task_bps_ht;
static const struct rhashtable_params task_bps_ht_params = {
Expand Down Expand Up @@ -194,6 +197,10 @@ static __init int init_breakpoint_slots(void)
goto err;
}
}
for (i = 0; i < TYPE_MAX; i++) {
if (!bp_slots_histogram_alloc(&cpu_pinned[i], i))
goto err;
}

return 0;
err:
Expand All @@ -203,6 +210,8 @@ static __init int init_breakpoint_slots(void)
if (err_cpu == cpu)
break;
}
for (i = 0; i < TYPE_MAX; i++)
bp_slots_histogram_free(&cpu_pinned[i]);

return -ENOMEM;
}
Expand Down Expand Up @@ -270,6 +279,9 @@ static unsigned int max_task_bp_pinned(int cpu, enum bp_type_idx type)
/*
* Count the number of breakpoints of the same type and same task.
* The given event must be not on the list.
*
* If @cpu is -1, but the result of task_bp_pinned() is not CPU-independent,
* returns a negative value.
*/
static int task_bp_pinned(int cpu, struct perf_event *bp, enum bp_type_idx type)
{
Expand All @@ -288,9 +300,18 @@ static int task_bp_pinned(int cpu, struct perf_event *bp, enum bp_type_idx type)
goto out;

rhl_for_each_entry_rcu(iter, pos, head, hw.bp_list) {
if (find_slot_idx(iter->attr.bp_type) == type &&
(iter->cpu < 0 || cpu == iter->cpu))
count += hw_breakpoint_weight(iter);
if (find_slot_idx(iter->attr.bp_type) != type)
continue;

if (iter->cpu >= 0) {
if (cpu == -1) {
count = -1;
goto out;
} else if (cpu != iter->cpu)
continue;
}

count += hw_breakpoint_weight(iter);
}

out:
Expand All @@ -316,6 +337,19 @@ max_bp_pinned_slots(struct perf_event *bp, enum bp_type_idx type)
int pinned_slots = 0;
int cpu;

if (bp->hw.target && bp->cpu < 0) {
int max_pinned = task_bp_pinned(-1, bp, type);

if (max_pinned >= 0) {
/*
* Fast path: task_bp_pinned() is CPU-independent and
* returns the same value for any CPU.
*/
max_pinned += bp_slots_histogram_max(&cpu_pinned[type], type);
return max_pinned;
}
}

for_each_cpu(cpu, cpumask) {
struct bp_cpuinfo *info = get_bp_info(cpu, type);
int nr;
Expand Down Expand Up @@ -366,8 +400,11 @@ toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type,

/* Pinned counter cpu profiling */
if (!bp->hw.target) {
struct bp_cpuinfo *info = get_bp_info(bp->cpu, type);

lockdep_assert_held_write(&bp_cpuinfo_sem);
get_bp_info(bp->cpu, type)->cpu_pinned += weight;
bp_slots_histogram_add(&cpu_pinned[type], info->cpu_pinned, weight);
info->cpu_pinned += weight;
return 0;
}

Expand Down Expand Up @@ -804,6 +841,18 @@ bool hw_breakpoint_is_used(void)
}
}

for (int type = 0; type < TYPE_MAX; ++type) {
for (int slot = 0; slot < hw_breakpoint_slots_cached(type); ++slot) {
/*
* Warn, because if there are CPU pinned counters,
* should never get here; bp_cpuinfo::cpu_pinned should
* be consistent with the global cpu_pinned histogram.
*/
if (WARN_ON(atomic_read(&cpu_pinned[type].count[slot])))
return true;
}
}

return false;
}

Expand Down

0 comments on commit 9b1933b

Please sign in to comment.