Skip to content

Commit

Permalink
[PATCH] lockdep: remove mutex deadlock checking code
Browse files Browse the repository at this point in the history
With the lock validator we detect mutex deadlocks (and more), the mutex
deadlock checking code is both redundant and slower.  So remove it.

Signed-off-by: Ingo Molnar <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
Ingo Molnar authored and Linus Torvalds committed Jul 3, 2006
1 parent 3659624 commit fb7e424
Show file tree
Hide file tree
Showing 2 changed files with 0 additions and 324 deletions.
316 changes: 0 additions & 316 deletions kernel/mutex-debug.c
Original file line number Diff line number Diff line change
Expand Up @@ -51,321 +51,6 @@ LIST_HEAD(debug_mutex_held_locks);
*/
int debug_mutex_on = 1;

static void printk_task(struct task_struct *p)
{
if (p)
printk("%16s:%5d [%p, %3d]", p->comm, p->pid, p, p->prio);
else
printk("<none>");
}

static void printk_ti(struct thread_info *ti)
{
if (ti)
printk_task(ti->task);
else
printk("<none>");
}

static void printk_task_short(struct task_struct *p)
{
if (p)
printk("%s/%d [%p, %3d]", p->comm, p->pid, p, p->prio);
else
printk("<none>");
}

static void printk_lock(struct mutex *lock, int print_owner)
{
printk(" [%p] {%s}\n", lock, lock->name);

if (print_owner && lock->owner) {
printk(".. held by: ");
printk_ti(lock->owner);
printk("\n");
}
if (lock->owner) {
printk("... acquired at: ");
print_symbol("%s\n", lock->acquire_ip);
}
}

/*
* printk locks held by a task:
*/
static void show_task_locks(struct task_struct *p)
{
switch (p->state) {
case TASK_RUNNING: printk("R"); break;
case TASK_INTERRUPTIBLE: printk("S"); break;
case TASK_UNINTERRUPTIBLE: printk("D"); break;
case TASK_STOPPED: printk("T"); break;
case EXIT_ZOMBIE: printk("Z"); break;
case EXIT_DEAD: printk("X"); break;
default: printk("?"); break;
}
printk_task(p);
if (p->blocked_on) {
struct mutex *lock = p->blocked_on->lock;

printk(" blocked on mutex:");
printk_lock(lock, 1);
} else
printk(" (not blocked on mutex)\n");
}

/*
* printk all locks held in the system (if filter == NULL),
* or all locks belonging to a single task (if filter != NULL):
*/
void show_held_locks(struct task_struct *filter)
{
struct list_head *curr, *cursor = NULL;
struct mutex *lock;
struct thread_info *t;
unsigned long flags;
int count = 0;

if (filter) {
printk("------------------------------\n");
printk("| showing all locks held by: | (");
printk_task_short(filter);
printk("):\n");
printk("------------------------------\n");
} else {
printk("---------------------------\n");
printk("| showing all locks held: |\n");
printk("---------------------------\n");
}

/*
* Play safe and acquire the global trace lock. We
* cannot printk with that lock held so we iterate
* very carefully:
*/
next:
debug_spin_lock_save(&debug_mutex_lock, flags);
list_for_each(curr, &debug_mutex_held_locks) {
if (cursor && curr != cursor)
continue;
lock = list_entry(curr, struct mutex, held_list);
t = lock->owner;
if (filter && (t != filter->thread_info))
continue;
count++;
cursor = curr->next;
debug_spin_unlock_restore(&debug_mutex_lock, flags);

printk("\n#%03d: ", count);
printk_lock(lock, filter ? 0 : 1);
goto next;
}
debug_spin_unlock_restore(&debug_mutex_lock, flags);
printk("\n");
}

void mutex_debug_show_all_locks(void)
{
struct task_struct *g, *p;
int count = 10;
int unlock = 1;

printk("\nShowing all blocking locks in the system:\n");

/*
* Here we try to get the tasklist_lock as hard as possible,
* if not successful after 2 seconds we ignore it (but keep
* trying). This is to enable a debug printout even if a
* tasklist_lock-holding task deadlocks or crashes.
*/
retry:
if (!read_trylock(&tasklist_lock)) {
if (count == 10)
printk("hm, tasklist_lock locked, retrying... ");
if (count) {
count--;
printk(" #%d", 10-count);
mdelay(200);
goto retry;
}
printk(" ignoring it.\n");
unlock = 0;
}
if (count != 10)
printk(" locked it.\n");

do_each_thread(g, p) {
show_task_locks(p);
if (!unlock)
if (read_trylock(&tasklist_lock))
unlock = 1;
} while_each_thread(g, p);

printk("\n");
show_held_locks(NULL);
printk("=============================================\n\n");

if (unlock)
read_unlock(&tasklist_lock);
}

static void report_deadlock(struct task_struct *task, struct mutex *lock,
struct mutex *lockblk, unsigned long ip)
{
printk("\n%s/%d is trying to acquire this lock:\n",
current->comm, current->pid);
printk_lock(lock, 1);
printk("... trying at: ");
print_symbol("%s\n", ip);
show_held_locks(current);

if (lockblk) {
printk("but %s/%d is deadlocking current task %s/%d!\n\n",
task->comm, task->pid, current->comm, current->pid);
printk("\n%s/%d is blocked on this lock:\n",
task->comm, task->pid);
printk_lock(lockblk, 1);

show_held_locks(task);

printk("\n%s/%d's [blocked] stackdump:\n\n",
task->comm, task->pid);
show_stack(task, NULL);
}

printk("\n%s/%d's [current] stackdump:\n\n",
current->comm, current->pid);
dump_stack();
mutex_debug_show_all_locks();
printk("[ turning off deadlock detection. Please report this. ]\n\n");
local_irq_disable();
}

/*
* Recursively check for mutex deadlocks:
*/
static int check_deadlock(struct mutex *lock, int depth,
struct thread_info *ti, unsigned long ip)
{
struct mutex *lockblk;
struct task_struct *task;

if (!debug_mutex_on)
return 0;

ti = lock->owner;
if (!ti)
return 0;

task = ti->task;
lockblk = NULL;
if (task->blocked_on)
lockblk = task->blocked_on->lock;

/* Self-deadlock: */
if (current == task) {
DEBUG_OFF();
if (depth)
return 1;
printk("\n==========================================\n");
printk( "[ BUG: lock recursion deadlock detected! |\n");
printk( "------------------------------------------\n");
report_deadlock(task, lock, NULL, ip);
return 0;
}

/* Ugh, something corrupted the lock data structure? */
if (depth > 20) {
DEBUG_OFF();
printk("\n===========================================\n");
printk( "[ BUG: infinite lock dependency detected!? |\n");
printk( "-------------------------------------------\n");
report_deadlock(task, lock, lockblk, ip);
return 0;
}

/* Recursively check for dependencies: */
if (lockblk && check_deadlock(lockblk, depth+1, ti, ip)) {
printk("\n============================================\n");
printk( "[ BUG: circular locking deadlock detected! ]\n");
printk( "--------------------------------------------\n");
report_deadlock(task, lock, lockblk, ip);
return 0;
}
return 0;
}

/*
* Called when a task exits, this function checks whether the
* task is holding any locks, and reports the first one if so:
*/
void mutex_debug_check_no_locks_held(struct task_struct *task)
{
struct list_head *curr, *next;
struct thread_info *t;
unsigned long flags;
struct mutex *lock;

if (!debug_mutex_on)
return;

debug_spin_lock_save(&debug_mutex_lock, flags);
list_for_each_safe(curr, next, &debug_mutex_held_locks) {
lock = list_entry(curr, struct mutex, held_list);
t = lock->owner;
if (t != task->thread_info)
continue;
list_del_init(curr);
DEBUG_OFF();
debug_spin_unlock_restore(&debug_mutex_lock, flags);

printk("BUG: %s/%d, lock held at task exit time!\n",
task->comm, task->pid);
printk_lock(lock, 1);
if (lock->owner != task->thread_info)
printk("exiting task is not even the owner??\n");
return;
}
debug_spin_unlock_restore(&debug_mutex_lock, flags);
}

/*
* Called when kernel memory is freed (or unmapped), or if a mutex
* is destroyed or reinitialized - this code checks whether there is
* any held lock in the memory range of <from> to <to>:
*/
void mutex_debug_check_no_locks_freed(const void *from, unsigned long len)
{
struct list_head *curr, *next;
const void *to = from + len;
unsigned long flags;
struct mutex *lock;
void *lock_addr;

if (!debug_mutex_on)
return;

debug_spin_lock_save(&debug_mutex_lock, flags);
list_for_each_safe(curr, next, &debug_mutex_held_locks) {
lock = list_entry(curr, struct mutex, held_list);
lock_addr = lock;
if (lock_addr < from || lock_addr >= to)
continue;
list_del_init(curr);
DEBUG_OFF();
debug_spin_unlock_restore(&debug_mutex_lock, flags);

printk("BUG: %s/%d, active lock [%p(%p-%p)] freed!\n",
current->comm, current->pid, lock, from, to);
dump_stack();
printk_lock(lock, 1);
if (lock->owner != current_thread_info())
printk("freeing task is not even the owner??\n");
return;
}
debug_spin_unlock_restore(&debug_mutex_lock, flags);
}

/*
* Must be called with lock->wait_lock held.
*/
Expand Down Expand Up @@ -405,7 +90,6 @@ void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
struct thread_info *ti __IP_DECL__)
{
SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
check_deadlock(lock, 0, ti, ip);
/* Mark the current thread as blocked on the lock: */
ti->task->blocked_on = waiter;
waiter->lock = lock;
Expand Down
8 changes: 0 additions & 8 deletions lib/Kconfig.debug
Original file line number Diff line number Diff line change
Expand Up @@ -115,14 +115,6 @@ config DEBUG_PREEMPT
if kernel code uses it in a preemption-unsafe way. Also, the kernel
will detect preemption count underflows.

config DEBUG_MUTEXES
bool "Mutex debugging, deadlock detection"
default n
depends on DEBUG_KERNEL
help
This allows mutex semantics violations and mutex related deadlocks
(lockups) to be detected and reported automatically.

config DEBUG_RT_MUTEXES
bool "RT Mutex debugging, deadlock detection"
depends on DEBUG_KERNEL && RT_MUTEXES
Expand Down

0 comments on commit fb7e424

Please sign in to comment.