Skip to content

Commit

Permalink
perf/x86: Implement immediate enforcement of /sys/devices/cpu/rdpmc v…
Browse files Browse the repository at this point in the history
…alue of 0

When you successfully write 0 to /sys/devices/cpu/rdpmc, the RDPMC
instruction should be disabled unconditionally and immediately (after you
close the SYSFS file) by the documentation.

Instead, in the current implementation the PMU must be reloaded which
happens only eventually some time in the future. Only after that the RDPMC
instruction becomes disabled (on ring 3) on the respective core.

This change makes the treatment of the 0 value as blocking and as
unconditional as the current treatment of the 2 value, only the CR4.PCE
bit is naturally set to false instead of true.

Signed-off-by: Anthony Steinhauser <[email protected]>
Cc: Alexander Shishkin <[email protected]>
Cc: Arnaldo Carvalho de Melo <[email protected]>
Cc: Jiri Olsa <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Mark Rutland <[email protected]>
Cc: Namhyung Kim <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Stephane Eranian <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: Vince Weaver <[email protected]>
Cc: [email protected]
Link: https://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]>
  • Loading branch information
asteinha authored and Ingo Molnar committed Nov 27, 2019
1 parent 89d57dd commit 405b453
Show file tree
Hide file tree
Showing 2 changed files with 15 additions and 7 deletions.
18 changes: 12 additions & 6 deletions arch/x86/events/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,7 @@ DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
.enabled = 1,
};

DEFINE_STATIC_KEY_FALSE(rdpmc_never_available_key);
DEFINE_STATIC_KEY_FALSE(rdpmc_always_available_key);

u64 __read_mostly hw_cache_event_ids
Expand Down Expand Up @@ -2181,21 +2182,26 @@ static ssize_t set_attr_rdpmc(struct device *cdev,
if (x86_pmu.attr_rdpmc_broken)
return -ENOTSUPP;

if ((val == 2) != (x86_pmu.attr_rdpmc == 2)) {
if (val != x86_pmu.attr_rdpmc) {
/*
* Changing into or out of always available, aka
* perf-event-bypassing mode. This path is extremely slow,
* Changing into or out of never available or always available,
* aka perf-event-bypassing mode. This path is extremely slow,
* but only root can trigger it, so it's okay.
*/
if (val == 0)
static_branch_inc(&rdpmc_never_available_key);
else if (x86_pmu.attr_rdpmc == 0)
static_branch_dec(&rdpmc_never_available_key);

if (val == 2)
static_branch_inc(&rdpmc_always_available_key);
else
else if (x86_pmu.attr_rdpmc == 2)
static_branch_dec(&rdpmc_always_available_key);

on_each_cpu(refresh_pce, NULL, 1);
x86_pmu.attr_rdpmc = val;
}

x86_pmu.attr_rdpmc = val;

return count;
}

Expand Down
4 changes: 3 additions & 1 deletion arch/x86/include/asm/mmu_context.h
Original file line number Diff line number Diff line change
Expand Up @@ -26,12 +26,14 @@ static inline void paravirt_activate_mm(struct mm_struct *prev,

#ifdef CONFIG_PERF_EVENTS

DECLARE_STATIC_KEY_FALSE(rdpmc_never_available_key);
DECLARE_STATIC_KEY_FALSE(rdpmc_always_available_key);

static inline void load_mm_cr4_irqsoff(struct mm_struct *mm)
{
if (static_branch_unlikely(&rdpmc_always_available_key) ||
atomic_read(&mm->context.perf_rdpmc_allowed))
(!static_branch_unlikely(&rdpmc_never_available_key) &&
atomic_read(&mm->context.perf_rdpmc_allowed)))
cr4_set_bits_irqsoff(X86_CR4_PCE);
else
cr4_clear_bits_irqsoff(X86_CR4_PCE);
Expand Down

0 comments on commit 405b453

Please sign in to comment.