Skip to content

Commit

Permalink
Merge branch 'locking-urgent-for-linus' of git://git.kernel.org/pub/s…
Browse files Browse the repository at this point in the history
…cm/linux/kernel/git/tip/tip

Pull locking fixes from Thomas Gleixner:
 "A set of fixes and updates for the locking code:

   - Prevent lockdep from updating irq state within its own code and
     thereby confusing itself.

   - Buid fix for older GCCs which mistreat anonymous unions

   - Add a missing lockdep annotation in down_read_non_onwer() which
     causes up_read_non_owner() to emit a lockdep splat

   - Remove the custom alpha dec_and_lock() implementation which is
     incorrect in terms of ordering and use the generic one.

  The remaining two commits are not strictly fixes. They provide irqsave
  variants of atomic_dec_and_lock() and refcount_dec_and_lock(). These
  are required to merge the relevant updates and cleanups into different
  maintainer trees for 4.19, so routing them into mainline without
  actual users is the sanest approach.

  They should have been in -rc1, but last weekend I took the liberty to
  just avoid computers in order to regain some mental sanity"

* 'locking-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  locking/qspinlock: Fix build for anonymous union in older GCC compilers
  locking/lockdep: Do not record IRQ state within lockdep code
  locking/rwsem: Fix up_read_non_owner() warning with DEBUG_RWSEMS
  locking/refcounts: Implement refcount_dec_and_lock_irqsave()
  atomic: Add irqsave variant of atomic_dec_and_lock()
  alpha: Remove custom dec_and_lock() implementation
  • Loading branch information
torvalds committed Jun 24, 2018
2 parents a43de48 + 6cc65be commit 2da2ca2
Show file tree
Hide file tree
Showing 11 changed files with 61 additions and 64 deletions.
5 changes: 0 additions & 5 deletions arch/alpha/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -555,11 +555,6 @@ config SMP

If you don't know what to do here, say N.

config HAVE_DEC_LOCK
bool
depends on SMP
default y

config NR_CPUS
int "Maximum number of CPUs (2-32)"
range 2 32
Expand Down
2 changes: 0 additions & 2 deletions arch/alpha/lib/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -35,8 +35,6 @@ lib-y = __divqu.o __remqu.o __divlu.o __remlu.o \
callback_srm.o srm_puts.o srm_printk.o \
fls.o

lib-$(CONFIG_SMP) += dec_and_lock.o

# The division routines are built from single source, with different defines.
AFLAGS___divqu.o = -DDIV
AFLAGS___remqu.o = -DREM
Expand Down
44 changes: 0 additions & 44 deletions arch/alpha/lib/dec_and_lock.c

This file was deleted.

2 changes: 1 addition & 1 deletion include/asm-generic/qspinlock_types.h
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ typedef struct qspinlock {
/*
* Initializier
*/
#define __ARCH_SPIN_LOCK_UNLOCKED { .val = ATOMIC_INIT(0) }
#define __ARCH_SPIN_LOCK_UNLOCKED { { .val = ATOMIC_INIT(0) } }

/*
* Bitfields in the atomic value:
Expand Down
4 changes: 3 additions & 1 deletion include/linux/refcount.h
Original file line number Diff line number Diff line change
Expand Up @@ -98,5 +98,7 @@ extern __must_check bool refcount_dec_if_one(refcount_t *r);
extern __must_check bool refcount_dec_not_one(refcount_t *r);
extern __must_check bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock);
extern __must_check bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock);

extern __must_check bool refcount_dec_and_lock_irqsave(refcount_t *r,
spinlock_t *lock,
unsigned long *flags);
#endif /* _LINUX_REFCOUNT_H */
5 changes: 5 additions & 0 deletions include/linux/spinlock.h
Original file line number Diff line number Diff line change
Expand Up @@ -427,6 +427,11 @@ extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
#define atomic_dec_and_lock(atomic, lock) \
__cond_lock(lock, _atomic_dec_and_lock(atomic, lock))

extern int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock,
unsigned long *flags);
#define atomic_dec_and_lock_irqsave(atomic, lock, flags) \
__cond_lock(lock, _atomic_dec_and_lock_irqsave(atomic, lock, &(flags)))

int alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *lock_mask,
size_t max_size, unsigned int cpu_mult,
gfp_t gfp);
Expand Down
12 changes: 6 additions & 6 deletions kernel/locking/lockdep.c
Original file line number Diff line number Diff line change
Expand Up @@ -1265,11 +1265,11 @@ unsigned long lockdep_count_forward_deps(struct lock_class *class)
this.parent = NULL;
this.class = class;

local_irq_save(flags);
raw_local_irq_save(flags);
arch_spin_lock(&lockdep_lock);
ret = __lockdep_count_forward_deps(&this);
arch_spin_unlock(&lockdep_lock);
local_irq_restore(flags);
raw_local_irq_restore(flags);

return ret;
}
Expand All @@ -1292,11 +1292,11 @@ unsigned long lockdep_count_backward_deps(struct lock_class *class)
this.parent = NULL;
this.class = class;

local_irq_save(flags);
raw_local_irq_save(flags);
arch_spin_lock(&lockdep_lock);
ret = __lockdep_count_backward_deps(&this);
arch_spin_unlock(&lockdep_lock);
local_irq_restore(flags);
raw_local_irq_restore(flags);

return ret;
}
Expand Down Expand Up @@ -4411,7 +4411,7 @@ void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len)
if (unlikely(!debug_locks))
return;

local_irq_save(flags);
raw_local_irq_save(flags);
for (i = 0; i < curr->lockdep_depth; i++) {
hlock = curr->held_locks + i;

Expand All @@ -4422,7 +4422,7 @@ void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len)
print_freed_lock_bug(curr, mem_from, mem_from + mem_len, hlock);
break;
}
local_irq_restore(flags);
raw_local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(debug_check_no_locks_freed);

Expand Down
1 change: 1 addition & 0 deletions kernel/locking/rwsem.c
Original file line number Diff line number Diff line change
Expand Up @@ -181,6 +181,7 @@ void down_read_non_owner(struct rw_semaphore *sem)
might_sleep();

__down_read(sem);
rwsem_set_reader_owned(sem);
}

EXPORT_SYMBOL(down_read_non_owner);
Expand Down
6 changes: 1 addition & 5 deletions lib/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
sha1.o chacha20.o irq_regs.o argv_split.o \
flex_proportions.o ratelimit.o show_mem.o \
is_single_threaded.o plist.o decompress.o kobject_uevent.o \
earlycpio.o seq_buf.o siphash.o \
earlycpio.o seq_buf.o siphash.o dec_and_lock.o \
nmi_backtrace.o nodemask.o win_minmax.o

lib-$(CONFIG_PRINTK) += dump_stack.o
Expand Down Expand Up @@ -95,10 +95,6 @@ obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
obj-$(CONFIG_DEBUG_LIST) += list_debug.o
obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o

ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
lib-y += dec_and_lock.o
endif

obj-$(CONFIG_BITREVERSE) += bitrev.o
obj-$(CONFIG_RATIONAL) += rational.o
obj-$(CONFIG_CRC_CCITT) += crc-ccitt.o
Expand Down
16 changes: 16 additions & 0 deletions lib/dec_and_lock.c
Original file line number Diff line number Diff line change
Expand Up @@ -33,3 +33,19 @@ int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
}

EXPORT_SYMBOL(_atomic_dec_and_lock);

int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock,
unsigned long *flags)
{
/* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
if (atomic_add_unless(atomic, -1, 1))
return 0;

/* Otherwise do it the slow way */
spin_lock_irqsave(lock, *flags);
if (atomic_dec_and_test(atomic))
return 1;
spin_unlock_irqrestore(lock, *flags);
return 0;
}
EXPORT_SYMBOL(_atomic_dec_and_lock_irqsave);
28 changes: 28 additions & 0 deletions lib/refcount.c
Original file line number Diff line number Diff line change
Expand Up @@ -350,3 +350,31 @@ bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock)
}
EXPORT_SYMBOL(refcount_dec_and_lock);

/**
* refcount_dec_and_lock_irqsave - return holding spinlock with disabled
* interrupts if able to decrement refcount to 0
* @r: the refcount
* @lock: the spinlock to be locked
* @flags: saved IRQ-flags if the is acquired
*
* Same as refcount_dec_and_lock() above except that the spinlock is acquired
* with disabled interupts.
*
* Return: true and hold spinlock if able to decrement refcount to 0, false
* otherwise
*/
bool refcount_dec_and_lock_irqsave(refcount_t *r, spinlock_t *lock,
unsigned long *flags)
{
if (refcount_dec_not_one(r))
return false;

spin_lock_irqsave(lock, *flags);
if (!refcount_dec_and_test(r)) {
spin_unlock_irqrestore(lock, *flags);
return false;
}

return true;
}
EXPORT_SYMBOL(refcount_dec_and_lock_irqsave);

0 comments on commit 2da2ca2

Please sign in to comment.