Skip to content

Commit

Permalink
powerpc/hw_breakpoint: Avoid relying on caller synchronization
Browse files Browse the repository at this point in the history
Internal data structures (cpu_bps, task_bps) of powerpc's hw_breakpoint
implementation have relied on nr_bp_mutex serializing access to them.

Before overhauling synchronization of kernel/events/hw_breakpoint.c,
introduce 2 spinlocks to synchronize cpu_bps and task_bps respectively,
thus avoiding reliance on callers synchronizing powerpc's hw_breakpoint.

Reported-by: Dmitry Vyukov <[email protected]>
Signed-off-by: Marco Elver <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Acked-by: Dmitry Vyukov <[email protected]>
Acked-by: Ian Rogers <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
  • Loading branch information
melver authored and Peter Zijlstra committed Aug 30, 2022
1 parent 24198ad commit f95e5a3
Showing 1 changed file with 40 additions and 13 deletions.
53 changes: 40 additions & 13 deletions arch/powerpc/kernel/hw_breakpoint.c
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/spinlock.h>
#include <linux/debugfs.h>
#include <linux/init.h>

Expand Down Expand Up @@ -129,7 +130,14 @@ struct breakpoint {
bool ptrace_bp;
};

/*
* While kernel/events/hw_breakpoint.c does its own synchronization, we cannot
* rely on it safely synchronizing internals here; however, we can rely on it
* not requesting more breakpoints than available.
*/
static DEFINE_SPINLOCK(cpu_bps_lock);
static DEFINE_PER_CPU(struct breakpoint *, cpu_bps[HBP_NUM_MAX]);
static DEFINE_SPINLOCK(task_bps_lock);
static LIST_HEAD(task_bps);

static struct breakpoint *alloc_breakpoint(struct perf_event *bp)
Expand Down Expand Up @@ -174,14 +182,17 @@ static int task_bps_add(struct perf_event *bp)
if (IS_ERR(tmp))
return PTR_ERR(tmp);

spin_lock(&task_bps_lock);
list_add(&tmp->list, &task_bps);
spin_unlock(&task_bps_lock);
return 0;
}

static void task_bps_remove(struct perf_event *bp)
{
struct list_head *pos, *q;

spin_lock(&task_bps_lock);
list_for_each_safe(pos, q, &task_bps) {
struct breakpoint *tmp = list_entry(pos, struct breakpoint, list);

Expand All @@ -191,6 +202,7 @@ static void task_bps_remove(struct perf_event *bp)
break;
}
}
spin_unlock(&task_bps_lock);
}

/*
Expand All @@ -200,12 +212,17 @@ static void task_bps_remove(struct perf_event *bp)
static bool all_task_bps_check(struct perf_event *bp)
{
struct breakpoint *tmp;
bool ret = false;

spin_lock(&task_bps_lock);
list_for_each_entry(tmp, &task_bps, list) {
if (!can_co_exist(tmp, bp))
return true;
if (!can_co_exist(tmp, bp)) {
ret = true;
break;
}
}
return false;
spin_unlock(&task_bps_lock);
return ret;
}

/*
Expand All @@ -215,13 +232,18 @@ static bool all_task_bps_check(struct perf_event *bp)
static bool same_task_bps_check(struct perf_event *bp)
{
struct breakpoint *tmp;
bool ret = false;

spin_lock(&task_bps_lock);
list_for_each_entry(tmp, &task_bps, list) {
if (tmp->bp->hw.target == bp->hw.target &&
!can_co_exist(tmp, bp))
return true;
!can_co_exist(tmp, bp)) {
ret = true;
break;
}
}
return false;
spin_unlock(&task_bps_lock);
return ret;
}

static int cpu_bps_add(struct perf_event *bp)
Expand All @@ -234,13 +256,15 @@ static int cpu_bps_add(struct perf_event *bp)
if (IS_ERR(tmp))
return PTR_ERR(tmp);

spin_lock(&cpu_bps_lock);
cpu_bp = per_cpu_ptr(cpu_bps, bp->cpu);
for (i = 0; i < nr_wp_slots(); i++) {
if (!cpu_bp[i]) {
cpu_bp[i] = tmp;
break;
}
}
spin_unlock(&cpu_bps_lock);
return 0;
}

Expand All @@ -249,6 +273,7 @@ static void cpu_bps_remove(struct perf_event *bp)
struct breakpoint **cpu_bp;
int i = 0;

spin_lock(&cpu_bps_lock);
cpu_bp = per_cpu_ptr(cpu_bps, bp->cpu);
for (i = 0; i < nr_wp_slots(); i++) {
if (!cpu_bp[i])
Expand All @@ -260,19 +285,25 @@ static void cpu_bps_remove(struct perf_event *bp)
break;
}
}
spin_unlock(&cpu_bps_lock);
}

static bool cpu_bps_check(int cpu, struct perf_event *bp)
{
struct breakpoint **cpu_bp;
bool ret = false;
int i;

spin_lock(&cpu_bps_lock);
cpu_bp = per_cpu_ptr(cpu_bps, cpu);
for (i = 0; i < nr_wp_slots(); i++) {
if (cpu_bp[i] && !can_co_exist(cpu_bp[i], bp))
return true;
if (cpu_bp[i] && !can_co_exist(cpu_bp[i], bp)) {
ret = true;
break;
}
}
return false;
spin_unlock(&cpu_bps_lock);
return ret;
}

static bool all_cpu_bps_check(struct perf_event *bp)
Expand All @@ -286,10 +317,6 @@ static bool all_cpu_bps_check(struct perf_event *bp)
return false;
}

/*
* We don't use any locks to serialize accesses to cpu_bps or task_bps
* because are already inside nr_bp_mutex.
*/
int arch_reserve_bp_slot(struct perf_event *bp)
{
int ret;
Expand Down

0 comments on commit f95e5a3

Please sign in to comment.