Skip to content

Commit

Permalink
x86: MCE: Add raw_lock conversion again
Browse files Browse the repository at this point in the history
Commit ea43164 ("x86/mce: Fix CMCI preemption bugs") breaks RT by
the completely unrelated conversion of the cmci_discover_lock to a
regular (non raw) spinlock.  This lock was annotated in commit
59d958d ("locking, x86: mce: Annotate cmci_discover_lock as raw")
with a proper explanation why.

The argument for converting the lock back to a regular spinlock was:

 - it does percpu ops without disabling preemption. Preemption is not
   disabled due to the mistaken use of a raw spinlock.

Which is complete nonsense.  The raw_spinlock is disabling preemption in
the same way as a regular spinlock.  In mainline spinlock maps to
raw_spinlock, in RT spinlock becomes a "sleeping" lock.

raw_spinlock has on RT exactly the same semantics as in mainline.  And
because this lock is taken in non preemptible context it must be raw on
RT.

Undo the locking brainfart.

Reported-by: Clark Williams <[email protected]>
Reported-by: Steven Rostedt <[email protected]>
Signed-off-by: Thomas Gleixner <[email protected]>
Cc: [email protected]
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
KAGA-KOKO authored and torvalds committed Aug 6, 2014
1 parent f4d3333 commit ed5c41d
Showing 1 changed file with 9 additions and 9 deletions.
18 changes: 9 additions & 9 deletions arch/x86/kernel/cpu/mcheck/mce_intel.c
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ static DEFINE_PER_CPU(mce_banks_t, mce_banks_owned);
* cmci_discover_lock protects against parallel discovery attempts
* which could race against each other.
*/
static DEFINE_SPINLOCK(cmci_discover_lock);
static DEFINE_RAW_SPINLOCK(cmci_discover_lock);

#define CMCI_THRESHOLD 1
#define CMCI_POLL_INTERVAL (30 * HZ)
Expand Down Expand Up @@ -144,14 +144,14 @@ static void cmci_storm_disable_banks(void)
int bank;
u64 val;

spin_lock_irqsave(&cmci_discover_lock, flags);
raw_spin_lock_irqsave(&cmci_discover_lock, flags);
owned = __get_cpu_var(mce_banks_owned);
for_each_set_bit(bank, owned, MAX_NR_BANKS) {
rdmsrl(MSR_IA32_MCx_CTL2(bank), val);
val &= ~MCI_CTL2_CMCI_EN;
wrmsrl(MSR_IA32_MCx_CTL2(bank), val);
}
spin_unlock_irqrestore(&cmci_discover_lock, flags);
raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
}

static bool cmci_storm_detect(void)
Expand Down Expand Up @@ -211,7 +211,7 @@ static void cmci_discover(int banks)
int i;
int bios_wrong_thresh = 0;

spin_lock_irqsave(&cmci_discover_lock, flags);
raw_spin_lock_irqsave(&cmci_discover_lock, flags);
for (i = 0; i < banks; i++) {
u64 val;
int bios_zero_thresh = 0;
Expand Down Expand Up @@ -266,7 +266,7 @@ static void cmci_discover(int banks)
WARN_ON(!test_bit(i, __get_cpu_var(mce_poll_banks)));
}
}
spin_unlock_irqrestore(&cmci_discover_lock, flags);
raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
if (mca_cfg.bios_cmci_threshold && bios_wrong_thresh) {
pr_info_once(
"bios_cmci_threshold: Some banks do not have valid thresholds set\n");
Expand Down Expand Up @@ -316,10 +316,10 @@ void cmci_clear(void)

if (!cmci_supported(&banks))
return;
spin_lock_irqsave(&cmci_discover_lock, flags);
raw_spin_lock_irqsave(&cmci_discover_lock, flags);
for (i = 0; i < banks; i++)
__cmci_disable_bank(i);
spin_unlock_irqrestore(&cmci_discover_lock, flags);
raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
}

static void cmci_rediscover_work_func(void *arg)
Expand Down Expand Up @@ -360,9 +360,9 @@ void cmci_disable_bank(int bank)
if (!cmci_supported(&banks))
return;

spin_lock_irqsave(&cmci_discover_lock, flags);
raw_spin_lock_irqsave(&cmci_discover_lock, flags);
__cmci_disable_bank(bank);
spin_unlock_irqrestore(&cmci_discover_lock, flags);
raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
}

static void intel_init_cmci(void)
Expand Down

0 comments on commit ed5c41d

Please sign in to comment.