Skip to content

Commit

Permalink
kexec: use a mutex for locking rather than xchg()
Browse files Browse the repository at this point in the history
Functionally the same, but more conventional.

Cc: Huang Ying <[email protected]>
Tested-by: Vivek Goyal <[email protected]>
Cc: "Eric W. Biederman" <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
akpm00 authored and torvalds committed Aug 15, 2008
1 parent 3122c33 commit 8c5a1cf
Showing 1 changed file with 10 additions and 24 deletions.
34 changes: 10 additions & 24 deletions kernel/kexec.c
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/kexec.h>
#include <linux/spinlock.h>
#include <linux/mutex.h>
#include <linux/list.h>
#include <linux/highmem.h>
#include <linux/syscalls.h>
Expand Down Expand Up @@ -924,19 +924,14 @@ static int kimage_load_segment(struct kimage *image,
*/
struct kimage *kexec_image;
struct kimage *kexec_crash_image;
/*
* A home grown binary mutex.
* Nothing can wait so this mutex is safe to use
* in interrupt context :)
*/
static int kexec_lock;

static DEFINE_MUTEX(kexec_mutex);

asmlinkage long sys_kexec_load(unsigned long entry, unsigned long nr_segments,
struct kexec_segment __user *segments,
unsigned long flags)
{
struct kimage **dest_image, *image;
int locked;
int result;

/* We only trust the superuser with rebooting the system. */
Expand Down Expand Up @@ -972,8 +967,7 @@ asmlinkage long sys_kexec_load(unsigned long entry, unsigned long nr_segments,
*
* KISS: always take the mutex.
*/
locked = xchg(&kexec_lock, 1);
if (locked)
if (!mutex_trylock(&kexec_mutex))
return -EBUSY;

dest_image = &kexec_image;
Expand Down Expand Up @@ -1015,8 +1009,7 @@ asmlinkage long sys_kexec_load(unsigned long entry, unsigned long nr_segments,
image = xchg(dest_image, image);

out:
locked = xchg(&kexec_lock, 0); /* Release the mutex */
BUG_ON(!locked);
mutex_unlock(&kexec_mutex);
kimage_free(image);

return result;
Expand Down Expand Up @@ -1063,28 +1056,23 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,

void crash_kexec(struct pt_regs *regs)
{
int locked;


/* Take the kexec_lock here to prevent sys_kexec_load
/* Take the kexec_mutex here to prevent sys_kexec_load
* running on one cpu from replacing the crash kernel
* we are using after a panic on a different cpu.
*
* If the crash kernel was not located in a fixed area
* of memory the xchg(&kexec_crash_image) would be
* sufficient. But since I reuse the memory...
*/
locked = xchg(&kexec_lock, 1);
if (!locked) {
if (mutex_trylock(&kexec_mutex)) {
if (kexec_crash_image) {
struct pt_regs fixed_regs;
crash_setup_regs(&fixed_regs, regs);
crash_save_vmcoreinfo();
machine_crash_shutdown(&fixed_regs);
machine_kexec(kexec_crash_image);
}
locked = xchg(&kexec_lock, 0);
BUG_ON(!locked);
mutex_unlock(&kexec_mutex);
}
}

Expand Down Expand Up @@ -1434,7 +1422,7 @@ int kernel_kexec(void)
{
int error = 0;

if (xchg(&kexec_lock, 1))
if (!mutex_trylock(&kexec_mutex))
return -EBUSY;
if (!kexec_image) {
error = -EINVAL;
Expand Down Expand Up @@ -1498,8 +1486,6 @@ int kernel_kexec(void)
#endif

Unlock:
if (!xchg(&kexec_lock, 0))
BUG();

mutex_unlock(&kexec_mutex);
return error;
}

0 comments on commit 8c5a1cf

Please sign in to comment.