Skip to content

Commit

Permalink
Merge branch 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel…
Browse files Browse the repository at this point in the history
…/git/jwessel/linux-2.6-kgdb

* 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jwessel/linux-2.6-kgdb:
  kdb,debug_core: adjust master cpu switch logic against new debug_core locking
  debug_core: refactor locking for master/slave cpus
  x86,kgdb: remove unnecessary call to kgdb_correct_hw_break()
  debug_core: disable hw_breakpoints on all cores in kgdb_cpu_enter()
  kdb,kgdb: fix sparse fixups
  kdb: Fix oops in kdb_unregister
  kdb,ftdump: Remove reference to internal kdb include
  kdb: Allow kernel loadable modules to add kdb shell functions
  debug_core: stop rcu warnings on kernel resume
  debug_core: move all watch dog syncs to a single function
  x86,kgdb: fix debugger hw breakpoint test regression in 2.6.35
  • Loading branch information
torvalds committed Oct 23, 2010
2 parents 5cc1035 + 495363d commit 8814011
Show file tree
Hide file tree
Showing 10 changed files with 144 additions and 132 deletions.
11 changes: 7 additions & 4 deletions arch/x86/kernel/kgdb.c
Original file line number Diff line number Diff line change
Expand Up @@ -477,8 +477,6 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
raw_smp_processor_id());
}

kgdb_correct_hw_break();

return 0;
}

Expand Down Expand Up @@ -621,7 +619,12 @@ int kgdb_arch_init(void)
static void kgdb_hw_overflow_handler(struct perf_event *event, int nmi,
struct perf_sample_data *data, struct pt_regs *regs)
{
kgdb_ll_trap(DIE_DEBUG, "debug", regs, 0, 0, SIGTRAP);
struct task_struct *tsk = current;
int i;

for (i = 0; i < 4; i++)
if (breakinfo[i].enabled)
tsk->thread.debugreg6 |= (DR_TRAP0 << i);
}

void kgdb_arch_late(void)
Expand All @@ -644,7 +647,7 @@ void kgdb_arch_late(void)
if (breakinfo[i].pev)
continue;
breakinfo[i].pev = register_wide_hw_breakpoint(&attr, NULL);
if (IS_ERR(breakinfo[i].pev)) {
if (IS_ERR((void * __force)breakinfo[i].pev)) {
printk(KERN_ERR "kgdb: Could not allocate hw"
"breakpoints\nDisabling the kernel debugger\n");
breakinfo[i].pev = NULL;
Expand Down
2 changes: 1 addition & 1 deletion drivers/serial/kgdboc.c
Original file line number Diff line number Diff line change
Expand Up @@ -243,7 +243,7 @@ static struct kgdb_io kgdboc_io_ops = {

#ifdef CONFIG_KGDB_SERIAL_CONSOLE
/* This is only available if kgdboc is a built in for early debugging */
int __init kgdboc_early_init(char *opt)
static int __init kgdboc_early_init(char *opt)
{
/* save the first character of the config string because the
* init routine can destroy it.
Expand Down
51 changes: 51 additions & 0 deletions include/linux/kdb.h
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,41 @@ extern int kdb_poll_idx;
extern int kdb_initial_cpu;
extern atomic_t kdb_event;

/* Types and messages used for dynamically added kdb shell commands */

#define KDB_MAXARGS 16 /* Maximum number of arguments to a function */

typedef enum {
KDB_REPEAT_NONE = 0, /* Do not repeat this command */
KDB_REPEAT_NO_ARGS, /* Repeat the command without arguments */
KDB_REPEAT_WITH_ARGS, /* Repeat the command including its arguments */
} kdb_repeat_t;

typedef int (*kdb_func_t)(int, const char **);

/* KDB return codes from a command or internal kdb function */
#define KDB_NOTFOUND (-1)
#define KDB_ARGCOUNT (-2)
#define KDB_BADWIDTH (-3)
#define KDB_BADRADIX (-4)
#define KDB_NOTENV (-5)
#define KDB_NOENVVALUE (-6)
#define KDB_NOTIMP (-7)
#define KDB_ENVFULL (-8)
#define KDB_ENVBUFFULL (-9)
#define KDB_TOOMANYBPT (-10)
#define KDB_TOOMANYDBREGS (-11)
#define KDB_DUPBPT (-12)
#define KDB_BPTNOTFOUND (-13)
#define KDB_BADMODE (-14)
#define KDB_BADINT (-15)
#define KDB_INVADDRFMT (-16)
#define KDB_BADREG (-17)
#define KDB_BADCPUNUM (-18)
#define KDB_BADLENGTH (-19)
#define KDB_NOBP (-20)
#define KDB_BADADDR (-21)

/*
* kdb_diemsg
*
Expand Down Expand Up @@ -104,10 +139,26 @@ int kdb_process_cpu(const struct task_struct *p)

/* kdb access to register set for stack dumping */
extern struct pt_regs *kdb_current_regs;
#ifdef CONFIG_KALLSYMS
extern const char *kdb_walk_kallsyms(loff_t *pos);
#else /* ! CONFIG_KALLSYMS */
static inline const char *kdb_walk_kallsyms(loff_t *pos)
{
return NULL;
}
#endif /* ! CONFIG_KALLSYMS */

/* Dynamic kdb shell command registration */
extern int kdb_register(char *, kdb_func_t, char *, char *, short);
extern int kdb_register_repeat(char *, kdb_func_t, char *, char *,
short, kdb_repeat_t);
extern int kdb_unregister(char *);
#else /* ! CONFIG_KGDB_KDB */
#define kdb_printf(...)
#define kdb_init(x)
#define kdb_register(...)
#define kdb_register_repeat(...)
#define kdb_uregister(x)
#endif /* CONFIG_KGDB_KDB */
enum {
KDB_NOT_INITIALIZED,
Expand Down
139 changes: 70 additions & 69 deletions kernel/debug/debug_core.c
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,7 @@
#include <linux/pid.h>
#include <linux/smp.h>
#include <linux/mm.h>
#include <linux/rcupdate.h>

#include <asm/cacheflush.h>
#include <asm/byteorder.h>
Expand Down Expand Up @@ -109,13 +110,15 @@ static struct kgdb_bkpt kgdb_break[KGDB_MAX_BREAKPOINTS] = {
*/
atomic_t kgdb_active = ATOMIC_INIT(-1);
EXPORT_SYMBOL_GPL(kgdb_active);
static DEFINE_RAW_SPINLOCK(dbg_master_lock);
static DEFINE_RAW_SPINLOCK(dbg_slave_lock);

/*
* We use NR_CPUs not PERCPU, in case kgdb is used to debug early
* bootup code (which might not have percpu set up yet):
*/
static atomic_t passive_cpu_wait[NR_CPUS];
static atomic_t cpu_in_kgdb[NR_CPUS];
static atomic_t masters_in_kgdb;
static atomic_t slaves_in_kgdb;
static atomic_t kgdb_break_tasklet_var;
atomic_t kgdb_setting_breakpoint;

Expand Down Expand Up @@ -457,26 +460,32 @@ static int kgdb_reenter_check(struct kgdb_state *ks)
return 1;
}

static void dbg_cpu_switch(int cpu, int next_cpu)
static void dbg_touch_watchdogs(void)
{
/* Mark the cpu we are switching away from as a slave when it
* holds the kgdb_active token. This must be done so that the
* that all the cpus wait in for the debug core will not enter
* again as the master. */
if (cpu == atomic_read(&kgdb_active)) {
kgdb_info[cpu].exception_state |= DCPU_IS_SLAVE;
kgdb_info[cpu].exception_state &= ~DCPU_WANT_MASTER;
}
kgdb_info[next_cpu].exception_state |= DCPU_NEXT_MASTER;
touch_softlockup_watchdog_sync();
clocksource_touch_watchdog();
rcu_cpu_stall_reset();
}

static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs)
static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs,
int exception_state)
{
unsigned long flags;
int sstep_tries = 100;
int error;
int i, cpu;
int cpu;
int trace_on = 0;
int online_cpus = num_online_cpus();

kgdb_info[ks->cpu].enter_kgdb++;
kgdb_info[ks->cpu].exception_state |= exception_state;

if (exception_state == DCPU_WANT_MASTER)
atomic_inc(&masters_in_kgdb);
else
atomic_inc(&slaves_in_kgdb);
kgdb_disable_hw_debug(ks->linux_regs);

acquirelock:
/*
* Interrupts will be restored by the 'trap return' code, except when
Expand All @@ -489,14 +498,15 @@ static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs)
kgdb_info[cpu].task = current;
kgdb_info[cpu].ret_state = 0;
kgdb_info[cpu].irq_depth = hardirq_count() >> HARDIRQ_SHIFT;
/*
* Make sure the above info reaches the primary CPU before
* our cpu_in_kgdb[] flag setting does:
*/
atomic_inc(&cpu_in_kgdb[cpu]);

if (exception_level == 1)
/* Make sure the above info reaches the primary CPU */
smp_mb();

if (exception_level == 1) {
if (raw_spin_trylock(&dbg_master_lock))
atomic_xchg(&kgdb_active, cpu);
goto cpu_master_loop;
}

/*
* CPU will loop if it is a slave or request to become a kgdb
Expand All @@ -508,10 +518,12 @@ static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs)
kgdb_info[cpu].exception_state &= ~DCPU_NEXT_MASTER;
goto cpu_master_loop;
} else if (kgdb_info[cpu].exception_state & DCPU_WANT_MASTER) {
if (atomic_cmpxchg(&kgdb_active, -1, cpu) == cpu)
if (raw_spin_trylock(&dbg_master_lock)) {
atomic_xchg(&kgdb_active, cpu);
break;
}
} else if (kgdb_info[cpu].exception_state & DCPU_IS_SLAVE) {
if (!atomic_read(&passive_cpu_wait[cpu]))
if (!raw_spin_is_locked(&dbg_slave_lock))
goto return_normal;
} else {
return_normal:
Expand All @@ -522,9 +534,12 @@ static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs)
arch_kgdb_ops.correct_hw_break();
if (trace_on)
tracing_on();
atomic_dec(&cpu_in_kgdb[cpu]);
touch_softlockup_watchdog_sync();
clocksource_touch_watchdog();
kgdb_info[cpu].exception_state &=
~(DCPU_WANT_MASTER | DCPU_IS_SLAVE);
kgdb_info[cpu].enter_kgdb--;
smp_mb__before_atomic_dec();
atomic_dec(&slaves_in_kgdb);
dbg_touch_watchdogs();
local_irq_restore(flags);
return 0;
}
Expand All @@ -541,8 +556,8 @@ static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs)
(kgdb_info[cpu].task &&
kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
atomic_set(&kgdb_active, -1);
touch_softlockup_watchdog_sync();
clocksource_touch_watchdog();
raw_spin_unlock(&dbg_master_lock);
dbg_touch_watchdogs();
local_irq_restore(flags);

goto acquirelock;
Expand All @@ -563,16 +578,12 @@ static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs)
if (dbg_io_ops->pre_exception)
dbg_io_ops->pre_exception();

kgdb_disable_hw_debug(ks->linux_regs);

/*
* Get the passive CPU lock which will hold all the non-primary
* CPU in a spin state while the debugger is active
*/
if (!kgdb_single_step) {
for (i = 0; i < NR_CPUS; i++)
atomic_inc(&passive_cpu_wait[i]);
}
if (!kgdb_single_step)
raw_spin_lock(&dbg_slave_lock);

#ifdef CONFIG_SMP
/* Signal the other CPUs to enter kgdb_wait() */
Expand All @@ -583,10 +594,9 @@ static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs)
/*
* Wait for the other CPUs to be notified and be waiting for us:
*/
for_each_online_cpu(i) {
while (kgdb_do_roundup && !atomic_read(&cpu_in_kgdb[i]))
cpu_relax();
}
while (kgdb_do_roundup && (atomic_read(&masters_in_kgdb) +
atomic_read(&slaves_in_kgdb)) != online_cpus)
cpu_relax();

/*
* At this point the primary processor is completely
Expand Down Expand Up @@ -615,7 +625,8 @@ static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs)
if (error == DBG_PASS_EVENT) {
dbg_kdb_mode = !dbg_kdb_mode;
} else if (error == DBG_SWITCH_CPU_EVENT) {
dbg_cpu_switch(cpu, dbg_switch_cpu);
kgdb_info[dbg_switch_cpu].exception_state |=
DCPU_NEXT_MASTER;
goto cpu_loop;
} else {
kgdb_info[cpu].ret_state = error;
Expand All @@ -627,24 +638,11 @@ static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs)
if (dbg_io_ops->post_exception)
dbg_io_ops->post_exception();

atomic_dec(&cpu_in_kgdb[ks->cpu]);

if (!kgdb_single_step) {
for (i = NR_CPUS-1; i >= 0; i--)
atomic_dec(&passive_cpu_wait[i]);
/*
* Wait till all the CPUs have quit from the debugger,
* but allow a CPU that hit an exception and is
* waiting to become the master to remain in the debug
* core.
*/
for_each_online_cpu(i) {
while (kgdb_do_roundup &&
atomic_read(&cpu_in_kgdb[i]) &&
!(kgdb_info[i].exception_state &
DCPU_WANT_MASTER))
cpu_relax();
}
raw_spin_unlock(&dbg_slave_lock);
/* Wait till all the CPUs have quit from the debugger. */
while (kgdb_do_roundup && atomic_read(&slaves_in_kgdb))
cpu_relax();
}

kgdb_restore:
Expand All @@ -655,12 +653,20 @@ static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs)
else
kgdb_sstep_pid = 0;
}
if (arch_kgdb_ops.correct_hw_break)
arch_kgdb_ops.correct_hw_break();
if (trace_on)
tracing_on();

kgdb_info[cpu].exception_state &=
~(DCPU_WANT_MASTER | DCPU_IS_SLAVE);
kgdb_info[cpu].enter_kgdb--;
smp_mb__before_atomic_dec();
atomic_dec(&masters_in_kgdb);
/* Free kgdb_active */
atomic_set(&kgdb_active, -1);
touch_softlockup_watchdog_sync();
clocksource_touch_watchdog();
raw_spin_unlock(&dbg_master_lock);
dbg_touch_watchdogs();
local_irq_restore(flags);

return kgdb_info[cpu].ret_state;
Expand All @@ -678,7 +684,6 @@ kgdb_handle_exception(int evector, int signo, int ecode, struct pt_regs *regs)
{
struct kgdb_state kgdb_var;
struct kgdb_state *ks = &kgdb_var;
int ret;

ks->cpu = raw_smp_processor_id();
ks->ex_vector = evector;
Expand All @@ -689,11 +694,10 @@ kgdb_handle_exception(int evector, int signo, int ecode, struct pt_regs *regs)

if (kgdb_reenter_check(ks))
return 0; /* Ouch, double exception ! */
kgdb_info[ks->cpu].exception_state |= DCPU_WANT_MASTER;
ret = kgdb_cpu_enter(ks, regs);
kgdb_info[ks->cpu].exception_state &= ~(DCPU_WANT_MASTER |
DCPU_IS_SLAVE);
return ret;
if (kgdb_info[ks->cpu].enter_kgdb != 0)
return 0;

return kgdb_cpu_enter(ks, regs, DCPU_WANT_MASTER);
}

int kgdb_nmicallback(int cpu, void *regs)
Expand All @@ -706,12 +710,9 @@ int kgdb_nmicallback(int cpu, void *regs)
ks->cpu = cpu;
ks->linux_regs = regs;

if (!atomic_read(&cpu_in_kgdb[cpu]) &&
atomic_read(&kgdb_active) != -1 &&
atomic_read(&kgdb_active) != cpu) {
kgdb_info[cpu].exception_state |= DCPU_IS_SLAVE;
kgdb_cpu_enter(ks, regs);
kgdb_info[cpu].exception_state &= ~DCPU_IS_SLAVE;
if (kgdb_info[ks->cpu].enter_kgdb == 0 &&
raw_spin_is_locked(&dbg_master_lock)) {
kgdb_cpu_enter(ks, regs, DCPU_IS_SLAVE);
return 0;
}
#endif
Expand Down
1 change: 1 addition & 0 deletions kernel/debug/debug_core.h
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@ struct debuggerinfo_struct {
int exception_state;
int ret_state;
int irq_depth;
int enter_kgdb;
};

extern struct debuggerinfo_struct kgdb_info[];
Expand Down
Loading

0 comments on commit 8814011

Please sign in to comment.